Construction & inference in JavaScript

The following example is provided in both TypeScript and JavaScript.

TypeScript

import { Network, Node, Variable, State, Link, TableIterator, Table, RelevanceTreeInferenceFactory } from 'bayes-server';

// In this example we programatically create a simple Bayesian network.

let network = new Network("Demo");

// add the nodes (variables)

let aTrue = new State("True");
let aFalse = new State("False");
let a = new Node("A", [aTrue, aFalse]);

let bTrue = new State("True");
let bFalse = new State("False");
let b = new Node("B", [bTrue, bFalse]);

let cTrue = new State("True");
let cFalse = new State("False");
let c = new Node("C", [cTrue, cFalse]);

let dTrue = new State("True");
let dFalse = new State("False");
let d = new Node("D", [dTrue, dFalse]);

network.nodes.push(a);
network.nodes.push(b);
network.nodes.push(c);
network.nodes.push(d);

// add some directed links

network.links.push(new Link(a, b));
network.links.push(new Link(a, c));
network.links.push(new Link(b, d));
network.links.push(new Link(c, d));

// at this point we have fully specified the structural (graphical) specification of the Bayesian Network.

// We must define the necessary probability distributions for each node.

// Each node in a Bayesian Network requires a probability distribution conditioned on it's parents.

// NewDistribution() can be called on a Node to create the appropriate probability distribution for a node
// or it can be created manually.

// The interface IDistribution has been designed to represent both discrete and continuous variables,

// As we are currently dealing with discrete distributions, we will use the
// Table class.

// To access the discrete part of a distribution, we use IDistribution.Table.

// The Table class is used to define distributions over a number of discrete variables.

let tableA = a.newDistribution().table;     // access the table property of the Distribution

// IMPORTANT
// Note that calling Node.NewDistribution() does NOT assign the distribution to the node.
// A distribution cannot be assigned to a node until it is correctly specified.
// If a distribution becomes invalid  (e.g. a parent node is added), it is automatically set to null.

tableA.set(0.1, [aTrue]);
tableA.set(0.9, [aFalse]);

// now tableA is correctly specified we can assign it to Node A;
a.distribution = tableA;


// node B has node A as a parent, therefore its distribution will be P(B|A)

let tableB = b.newDistribution().table;
tableB.set(0.2, [aTrue, bTrue]);
tableB.set(0.8, [aTrue, bFalse]);
tableB.set(0.15, [aFalse, bTrue]);
tableB.set(0.85, [aFalse, bFalse]);
b.distribution = tableB;


// specify P(C|A)
let tableC = c.newDistribution().table;
tableC.set(0.3, [aTrue, cTrue]);
tableC.set(0.7, [aTrue, cFalse]);
tableC.set(0.4, [aFalse, cTrue]);
tableC.set(0.6, [aFalse, cFalse]);
c.distribution = tableC;


// specify P(D|B,C)
let tableD = d.newDistribution().table;

// we could specify the values individually as above, or we can use a TableIterator as follows
let iteratorD = new TableIterator(tableD, [b, c, d]);
iteratorD.copyFrom([0.4, 0.6, 0.55, 0.45, 0.32, 0.68, 0.01, 0.99]);
d.distribution = tableD;


// The network is now fully specified

// If required the network can be saved...
// let result = network.saveToString();

// Now we will calculate P(A|D=True), i.e. the probability of A given the evidence that D is true

// use the factory design pattern to create the necessary inference related objects
let factory = new RelevanceTreeInferenceFactory();
let inference = factory.createInferenceEngine(network);
let queryOptions = factory.createQueryOptions();
let queryOutput = factory.createQueryOutput();

// we could have created these objects explicitly instead, but as the number of algorithms grows
// this makes it easier to switch between them

inference.evidence.setState(dTrue);  // set D = True

let queryA = new Table(a);
inference.queryDistributions.pushDistribution(queryA);
inference.query(queryOptions, queryOutput); // note that this can raise an exception (see help for details)

console.log("P(A|D=True) = {" + queryA.get([aTrue]) + "," + queryA.get([aFalse]) + "}.");

// Expected output ...
// P(A|D=True) = {0.0980748663101604,0.90192513368984}

// to perform another query we reuse all the objects

// now lets calculate P(A|D=True, C=True)
inference.evidence.setState(cTrue);

// we will also return the log-likelihood of the case
queryOptions.logLikelihood = true; // only request the log-likelihood if you really need it, as extra computation is involved

inference.query(queryOptions, queryOutput);
console.log((`P(A|D=True, C=True) = [${queryA.get([aTrue])},${queryA.get([aFalse])}], log-likelihood = ${queryOutput.logLikelihood}.`));

// Expected output ...
// P(A|D=True, C=True) = {0.0777777777777778,0.922222222222222}, log-likelihood = -2.04330249506396.


// Note that we can also calculate joint queries such as P(A,B|D=True,C=True)

JavaScript (es2015)

"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const bayes_server_1 = require("bayes-server");
// In this example we programatically create a simple Bayesian network.
let network = new bayes_server_1.Network("Demo");
// add the nodes (variables)
let aTrue = new bayes_server_1.State("True");
let aFalse = new bayes_server_1.State("False");
let a = new bayes_server_1.Node("A", [aTrue, aFalse]);
let bTrue = new bayes_server_1.State("True");
let bFalse = new bayes_server_1.State("False");
let b = new bayes_server_1.Node("B", [bTrue, bFalse]);
let cTrue = new bayes_server_1.State("True");
let cFalse = new bayes_server_1.State("False");
let c = new bayes_server_1.Node("C", [cTrue, cFalse]);
let dTrue = new bayes_server_1.State("True");
let dFalse = new bayes_server_1.State("False");
let d = new bayes_server_1.Node("D", [dTrue, dFalse]);
network.nodes.push(a);
network.nodes.push(b);
network.nodes.push(c);
network.nodes.push(d);
// add some directed links
network.links.push(new bayes_server_1.Link(a, b));
network.links.push(new bayes_server_1.Link(a, c));
network.links.push(new bayes_server_1.Link(b, d));
network.links.push(new bayes_server_1.Link(c, d));
// at this point we have fully specified the structural (graphical) specification of the Bayesian Network.
// We must define the necessary probability distributions for each node.
// Each node in a Bayesian Network requires a probability distribution conditioned on it's parents.
// NewDistribution() can be called on a Node to create the appropriate probability distribution for a node
// or it can be created manually.
// The interface IDistribution has been designed to represent both discrete and continuous variables,
// As we are currently dealing with discrete distributions, we will use the
// Table class.
// To access the discrete part of a distribution, we use IDistribution.Table.
// The Table class is used to define distributions over a number of discrete variables.
let tableA = a.newDistribution().table; // access the table property of the Distribution
// IMPORTANT
// Note that calling Node.NewDistribution() does NOT assign the distribution to the node.
// A distribution cannot be assigned to a node until it is correctly specified.
// If a distribution becomes invalid  (e.g. a parent node is added), it is automatically set to null.
tableA.set(0.1, [aTrue]);
tableA.set(0.9, [aFalse]);
// now tableA is correctly specified we can assign it to Node A;
a.distribution = tableA;
// node B has node A as a parent, therefore its distribution will be P(B|A)
let tableB = b.newDistribution().table;
tableB.set(0.2, [aTrue, bTrue]);
tableB.set(0.8, [aTrue, bFalse]);
tableB.set(0.15, [aFalse, bTrue]);
tableB.set(0.85, [aFalse, bFalse]);
b.distribution = tableB;
// specify P(C|A)
let tableC = c.newDistribution().table;
tableC.set(0.3, [aTrue, cTrue]);
tableC.set(0.7, [aTrue, cFalse]);
tableC.set(0.4, [aFalse, cTrue]);
tableC.set(0.6, [aFalse, cFalse]);
c.distribution = tableC;
// specify P(D|B,C)
let tableD = d.newDistribution().table;
// we could specify the values individually as above, or we can use a TableIterator as follows
let iteratorD = new bayes_server_1.TableIterator(tableD, [b, c, d]);
iteratorD.copyFrom([0.4, 0.6, 0.55, 0.45, 0.32, 0.68, 0.01, 0.99]);
d.distribution = tableD;
// The network is now fully specified
// If required the network can be saved...
// let result = network.saveToString();
// Now we will calculate P(A|D=True), i.e. the probability of A given the evidence that D is true
// use the factory design pattern to create the necessary inference related objects
let factory = new bayes_server_1.RelevanceTreeInferenceFactory();
let inference = factory.createInferenceEngine(network);
let queryOptions = factory.createQueryOptions();
let queryOutput = factory.createQueryOutput();
// we could have created these objects explicitly instead, but as the number of algorithms grows
// this makes it easier to switch between them
inference.evidence.setState(dTrue); // set D = True
let queryA = new bayes_server_1.Table(a);
inference.queryDistributions.pushDistribution(queryA);
inference.query(queryOptions, queryOutput); // note that this can raise an exception (see help for details)
console.log("P(A|D=True) = {" + queryA.get([aTrue]) + "," + queryA.get([aFalse]) + "}.");
// Expected output ...
// P(A|D=True) = {0.0980748663101604,0.90192513368984}
// to perform another query we reuse all the objects
// now lets calculate P(A|D=True, C=True)
inference.evidence.setState(cTrue);
// we will also return the log-likelihood of the case
queryOptions.logLikelihood = true; // only request the log-likelihood if you really need it, as extra computation is involved
inference.query(queryOptions, queryOutput);
console.log((`P(A|D=True, C=True) = [${queryA.get([aTrue])},${queryA.get([aFalse])}], log-likelihood = ${queryOutput.logLikelihood}.`));
// Expected output ...
// P(A|D=True, C=True) = {0.0777777777777778,0.922222222222222}, log-likelihood = -2.04330249506396.
// Note that we can also calculate joint queries such as P(A,B|D=True,C=True)