Log-likelihood analysis in Python

# __author__ = 'Bayes Server'
# __version__= '0.1'

from jpype import *  # pip install jpype1   (for Java 7, pip install jpype1 0.6.3, and remove convertStrings=False)

# TODO change path to Bayes Server jar file
classpath = 'C:\\Program Files\\Bayes Server\\Bayes Server 8.19\\API\\Java\\bayesserver-8.19.jar'

startJVM(getDefaultJVMPath(), '-Djava.class.path=%s' % classpath, convertStrings=False)

bayes = JPackage('com.bayesserver')
bayes_data = bayes.data
bayes_inference = bayes.inference
bayes_analysis = bayes.analysis

# // TODO change path to Waste network
network_path = 'C:\\ProgramData\\Bayes Server 8.19\\Sample Networks\\Waste.bayes'

network = bayes.Network()
network.load(network_path)

variables = network.getVariables()

# discrete
burning_regimen = variables.get('Burning Regimen', True)
waste_type = variables.get('Waste type', True)
filter_state = variables.get('Filter state', True)

# continuous
filter_efficiency = variables.get('Filter efficiency', True)
dust_emission = variables.get('Dust emission', True)
metals_in_waste = variables.get('Metals in waste', True)
co2_concentration = variables.get('CO2 concentration', True)
light_penetrability = variables.get('Light penetrability', True)
metals_emission = variables.get('Metals emission', True)

# You can either create some new evidence to analyze, or you can use
# inference.Evidence if you have evidence on an inference engine you want to analyze

evidence = bayes_inference.DefaultEvidence(network)

# set some evidence
evidence.setState(burning_regimen.getStates().get("Unstable", True))
evidence.setState(filter_state.getStates().get("Defect", True))
evidence.set(filter_efficiency, java.lang.Double(-0.45))
evidence.set(light_penetrability, java.lang.Double(2.0))
evidence.set(metals_emission, java.lang.Double(6.3))

evidence_to_analyse = JArray(bayes.Variable)(evidence.size())  # or you can use [a, b, c] syntax (see Impact Analysis example)
evidence.getVariables(evidence_to_analyse)
evidence_to_analyse_list = java.util.Arrays.asList(evidence_to_analyse)

options = bayes_analysis.LogLikelihoodAnalysisOptions()
options.setSubsetMethod(bayes_analysis.LogLikelihoodAnalysisSubsetMethod.EXCLUDE)
options.setMaxEvidenceSubsetSize(1)

output = bayes_analysis.LogLikelihoodAnalysis.calculate(
    network,
    evidence,
    evidence_to_analyse_list,
    options)

print(f'None={output.getBaseline().getLogLikelihoodNone()}\tAll={output.getBaseline().getLogLikelihoodAll()}')
print()
print('\t'.join(map(lambda v: str(v.getName()), evidence_to_analyse)))

# In this example, we have chosen to order by descending log-likelihood, as we are using
# LogLikelihoodAnalysisSubsetMethod.Exclude and wish to understand which evidence
# causes the biggest decrease (e.g. for anomaly detection)

for item in sorted(output.getItems(), key=lambda i: i.getLogLikelihood(), reverse=True):

    flags = '\t'.join(map(lambda f: 'T' if f else 'F', item.getEvidenceFlags()))
    print(f'{item.getLogLikelihood()}\t{flags}')


#  Expected output
#  None = 0  All = -25.7131596279843

#  Burning Regimen Filter state    Filter efficiency       Light penetrability     Metals emission
#  -14.1361346392178       T T       T F       T
#  - 17.0198859514185       T T       F T       T
#  - 20.9544632365106       F T       T T       T
#  - 23.6826206567332       T T       T T       F
#  - 25.7131596279843       T F       T T       T