Skip to main content

Decision graph in Python

# __author__ = 'Bayes Server'
# __version__= '0.4'

import jpype # pip install jpype1 (version 1.2.1 or later)
import jpype.imports
from jpype.types import *

classpath = "lib/bayesserver-10.8.jar" # TODO download the Bayes Server Java API, and adjust the path

# Launch the JVM

# import the Java modules
from com.bayesserver import *
from com.bayesserver.inference import *

# Uncomment the following line and change the license key, if you are using a licensed version
# License.validate("xxx")

# In this example we will first construct the well known
# Oil Wildcatter Decision Graph (Influence diagram) manually.
# We could instead use network.load(...) if we have an existing network.
# We will then use the Single Policy Updating algorithm to
# optimize decisions under uncertainty.

network = Network()

# Add standard probability nodes...

oilDry = State('Dry')
oilWet = State('Wet')
oilSoaking = State('Soaking')
oil = Variable('Oil', [oilDry, oilWet, oilSoaking])
nodeOil = Node(oil)

testResultClosed = State('Closed')
testResultOpen = State('Open')
testResultDiffuse = State('Diffuse')
testResult = Variable('Test Result', [testResultClosed, testResultOpen, testResultDiffuse])
nodeTestResult = Node(testResult)

# Add decision nodes...

testYes = State('Yes')
testNo = State('No')
test = Variable('Test?', VariableValueType.DISCRETE, VariableKind.DECISION)
nodeTest = Node(test)

drillYes = State('Yes')
drillNo = State('No')
drill = Variable('Drill?', VariableValueType.DISCRETE, VariableKind.DECISION)
nodeDrill = Node(drill)

# Add utility nodes...

# Note that utility variables in Bayes Server are continuous. They can even have variances.

drillUtility = Variable('Drill utility', VariableValueType.CONTINUOUS, VariableKind.UTILITY)
nodeDrillUtility = Node(drillUtility)

testUtility = Variable('Test utility', VariableValueType.CONTINUOUS, VariableKind.UTILITY)
nodeTestUtility = Node(testUtility)

# When a network has more than one utility node
# we need to add a further (leaf) utility node which
# both determines how the other utilities are to be combined
# and also provides a means of querying the maximum expected utility.
# We can even perform joint queries.

meu = Variable('MEU', VariableValueType.CONTINUOUS, VariableKind.UTILITY)
nodeMeu = Node(meu)

# Add the links

links = network.getLinks()
links.add(Link(nodeOil, nodeTestResult))
links.add(Link(nodeOil, nodeDrillUtility))
links.add(Link(nodeTestResult, nodeDrill))
links.add(Link(nodeTest, nodeTestResult))
links.add(Link(nodeTest, nodeDrill))
links.add(Link(nodeTest, nodeTestUtility))
links.add(Link(nodeDrill, nodeDrillUtility))
links.add(Link(nodeDrillUtility, nodeMeu))
links.add(Link(nodeTestUtility, nodeMeu))

# Here we will manually specify the distributions
# but we could also learn them from data

tableOil = nodeOil.newDistribution().getTable()
tableOil.set(0.5, [oilDry])
tableOil.set(0.3, [oilWet])
tableOil.set(0.2, [oilSoaking])

tableTestResult = nodeTestResult.newDistribution().getTable()

# We could set each value as we did for the previous distribution
# however because there are quite a few values we will use
# a table iterator

third = 1.0 / 3.0

[nodeOil, nodeTest, nodeTestResult]
[0.1, 0.3, 0.6, third, third, third, 0.3, 0.4, 0.3, third, third, third, 0.5, 0.4, 0.1, third, third, third])


tableTest = nodeTest.newDistribution().getTable()
tableTest.normalize(True) # set to uniform distribution

tableDrill = nodeDrill.newDistribution().getTable()
tableDrill.normalize(True) # set to uniform distribution

# In the oil wildcatter example, all utilities have zero variance (point Gaussians)
# however Bayes Server supports utility distributions with variances.
# In fact, if you learn the distributions from data they will typically have
# non-zero variances.

gaussianDrillUtility = nodeDrillUtility.newDistribution()
gaussianDrillUtility.setMean(drillUtility, -70.0, [oilDry, drillYes])
gaussianDrillUtility.setMean(drillUtility, 0.0, [oilDry, drillNo])
gaussianDrillUtility.setMean(drillUtility, 50.0, [oilWet, drillYes])
gaussianDrillUtility.setMean(drillUtility, 0.0, [oilWet, drillNo])
gaussianDrillUtility.setMean(drillUtility, 200.0, [oilSoaking, drillYes])
gaussianDrillUtility.setMean(drillUtility, 0.0, [oilSoaking, drillNo])

gaussianTestUtility = nodeTestUtility.newDistribution()
gaussianTestUtility.setMean(testUtility, -10.0, [testYes])
gaussianTestUtility.setMean(testUtility, 0.0, [testNo])

# The MEU utility defines how the utilities are combined.
# In this example we just add them, by giving each parent a weight of 1
gaussianMeu = nodeMeu.newDistribution()
gaussianMeu.setWeight(meu, drillUtility, 1.0)
gaussianMeu.setWeight(meu, testUtility, 1.0)

# Now the network structure and distributions are fully specified

# Next, lets query the network.

factory = RelevanceTreeInferenceFactory()
inference = factory.createInferenceEngine(network)
queryOptions = factory.createQueryOptions()
queryOutput = factory.createQueryOutput()

# We want to optimize the decisions under uncertainty so will
# use the Single Policy Updating algorithm (SPU)

queryOil = Table(oil) # query a probability variable
queryDrill = Table(drill) # query a decision variable
queryMeu = CLGaussian(meu) # get the Maximum Expected Utility (MEU)
queryJoint = CLGaussian([meu, oil]) # we can also query joint distributions.

queryDistributions = inference.getQueryDistributions()

# If we have any evidence to set use
# inference.Evidence.Set or inference.Evidence.SetState
# here

inference.query(queryOptions, queryOutput)

oilDryValue = queryOil.get([oilDry])
print('Oil = Dry\t{}'.format(oilDryValue)) # expected 0.5

meuValue = queryMeu.getMean(meu)
print('MEU\t{}'.format(meuValue)) # expected value 22.5

drillYesValue = queryDrill.get([drillYes])
print('Drill? = Yes\t{}'.format(drillYesValue)) # expected 0.59

meuOilDry = queryJoint.getMean(meu, [oilDry])
print('MEU Oil=Dry\t{}'.format(meuOilDry)) # expected -38.0