Skip to content

Instantly share code, notes, and snippets.

@apete

apete/.classpath Secret

Last active December 23, 2024 06:49
Show Gist options
  • Save apete/b3278dc2f8c2db6a00369c211ba321db to your computer and use it in GitHub Desktop.
Save apete/b3278dc2f8c2db6a00369c211ba321db to your computer and use it in GitHub Desktop.
ojAlgo-examples
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path=""/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-11">
<attributes>
<attribute name="module" value="true"/>
</attributes>
</classpathentry>
<classpathentry combineaccessrules="false" kind="src" path="/ojAlgo"/>
<classpathentry combineaccessrules="false" kind="src" path="/ojAlgo-rocksdb"/>
<classpathentry kind="output" path=""/>
</classpath>
*.class
*.prefs
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>ojAlgo-examples</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.MatrixR064;
import org.ojalgo.matrix.MatrixR064.DenseReceiver;
import org.ojalgo.matrix.decomposition.SingularValue;
import org.ojalgo.matrix.store.RawStore;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Uniform;
/**
* An example with common mistake(s) seen in code using ojAlgo.
*
* @see https://www.ojalgo.org/2021/08/common-mistake/
*/
public class CommonMistake {
private static final Uniform RANDOM = Uniform.standard();
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(CommonMistake.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* Just some initial matrix.
*/
MatrixR064 initialMatrix = MatrixR064.FACTORY.makeFilled(7, 9, RANDOM);
/*
* This line is copied from a GitHub repository using ojAlgo. It has several problems and, I believe,
* demonstrates some things that are not obvious to new users.
*/
MatrixR064 selectedColumns = MatrixR064.FACTORY.rows(initialMatrix.logical().limits(-1, 5).get().toRawCopy2D());
/*
* Further on in the code an SVD was calculated using the matrix of those selected columns.
*/
SingularValue<Double> svd = SingularValue.R064.make(selectedColumns);
svd.decompose(selectedColumns);
/*
* Let's do this in steps to see what actually happens
*/
/*
* This used to create a thin wrapper exposing additional "logical" API - without copying. Now this
* method is deprecated and entirely redundant as it simply returns "this".
*/
MatrixR064 logical = initialMatrix.logical();
/*
* A thin wrapper specifying that the number of columns should be limited to 5. (The rows limit is set
* to -1 resulting in "no limitation". That's an alternative to setting the rows limit to the
* full/actual number of rows.)
*/
MatrixR064 limited = logical.limits(-1, 5);
/*
* Previously used to get the "matrix" back from the "logical builder" – now redundant and simply
* returns "this".
*/
MatrixR064 gotten = limited.get();
/*
* This extracts the elements as a 2D double array. That's another copy.
*/
double[][] raw2D = gotten.toRawCopy2D();
/*
* This builds the final matrix. A third copy.
*/
selectedColumns = MatrixR064.FACTORY.rows(raw2D);
/*
* I hope it's clear that extracting the double[][] was completely unnecessary resulting in 2
* additional copies. Try to stop thinking in terms of double[][] ! It will prevent you from doing
* that kind of thing. Instead simply...
*/
selectedColumns = gotten;
/*
* And if you really do want another copy there is a more direct way to do it.
*/
selectedColumns = MatrixR064.FACTORY.copy(gotten);
/*
* The SVD instance has its own internal storage. When you perform a decomposition another copy is
* created.
*/
svd.decompose(gotten);
/*
* The ojAlgo design/feature set allows to get rid of the explicit copy used to calculate the SVD. A
* selected columns copy needs to be created somehow, but that copy can be made directly to the
* internal storage of the SVD instance. Simply feed the thin wrapper representing the columns
* selection of the original matrix to the SVD instance.
*/
svd.decompose(limited);
/*
* This, original, code...
*/
MatrixR064 unnecessary = MatrixR064.FACTORY.rows(initialMatrix.logical().limits(-1, 5).get().toRawCopy2D());
svd.decompose(unnecessary);
/*
* ...is equivalent to this. That is less to type, less to execute, and less (essentially no) data
* copying.
*/
svd.decompose(initialMatrix.limits(-1, 5));
/*
* Prior to v50 you had to do it this way:
*/
svd.decompose(initialMatrix.logical().limits(-1, 5));
/*
* Thinking in terms of double[][] will cause developers to create matrices this way.
*/
double[][] newData = new double[7][9];
for (int i = 0; i < newData.length; i++) {
for (int j = 0; j < newData[i].length; j++) {
newData[i][j] = RANDOM.doubleValue();
}
}
MatrixR064 newMatrix = MatrixR064.FACTORY.rows(newData);
/*
* The correct, ojAlgo, way to do it would be something like this. (Apart from being less code, this
* avoids that intermediate copy.)
*/
newMatrix = MatrixR064.FACTORY.makeFilled(7, 9, RANDOM);
/*
* Since this example is using the immutable MatrixR064 class, manually/explicitly setting the
* elements needs to be done using an intermediate mutable receiver. (Don't worry, there's no
* unnecessary copying.)
*/
DenseReceiver receiver = MatrixR064.FACTORY.newDenseBuilder(7, 9);
receiver.fillAll(RANDOM);
newMatrix = receiver.get();
svd.decompose(newMatrix);
/*
* If you don't need the new matrix afterwards you can do it even more directly. Actually "getting"
* the matrix can be handled internally by the SVD instance.
*/
svd.decompose(receiver);
/*
* Integrating with other tools or existing code it is common to have data already in a double[][]. To
* create an ojAlgo data structure this still does not need to be copied. There exists an ojAlgo class
* that uses double[][] internally and allows wrapping existing data.
*/
RawStore wrapped = RawStore.wrap(raw2D);
svd.decompose(wrapped);
}
}
import static org.ojalgo.function.constant.PrimitiveMath.QUARTER;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.MatrixR064;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Normal;
import org.ojalgo.random.Uniform;
import org.ojalgo.type.Stopwatch;
/**
* An example that, using matrix multiplication, demonstrates how to control ojAlgo's thread usage. To show
* the effects of the various alternatives the example meassures, and displays, execution times. What you
* should do is run this example yourself and have the Activity Monitor or Task Manager open so that you can
* see how many threads/cores are working on your machine. (If you do that you'll want to increase the matrix
* size.)
*
* @see https://www.ojalgo.org/2019/08/controlling-concurrency/
*/
public class ControllingConcurrency {
private static final int DIM = 1000;
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(ControllingConcurrency.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* Create 2 random matrices, large enough
*/
MatrixR064 mtrxA = MatrixR064.FACTORY.makeFilled(DIM, DIM, Normal.standard());
MatrixR064 mtrxB = MatrixR064.FACTORY.makeFilled(DIM, DIM, Uniform.standard());
/*
* A runnable task that executes the multiplication
*/
Runnable task = () -> mtrxA.multiply(mtrxB);
task.run(); // Just some warm-up
BasicLogger.debug();
BasicLogger.debug("The 'environment' that ojAlgo detected and adapts to");
BasicLogger.debug(OjAlgoUtils.ENVIRONMENT);
/*
* Execute that task and meassure how long it takes to complete
*/
BasicLogger.debug();
BasicLogger.debug("Execution time (no configuration): {}", Stopwatch.meassure(task));
/*
* ojAlgo can be limited to only "see" part of the availale cores/threads.
*/
OjAlgoUtils.limitEnvironmentBy(QUARTER);
BasicLogger.debug();
BasicLogger.debug("The, now limited, environment that ojAlgo will adapt to");
BasicLogger.debug(OjAlgoUtils.ENVIRONMENT);
BasicLogger.debug("CPU units: {}", OjAlgoUtils.ENVIRONMENT.units);
BasicLogger.debug("CPU cores: {}", OjAlgoUtils.ENVIRONMENT.cores);
BasicLogger.debug("CPU threads: {}", OjAlgoUtils.ENVIRONMENT.threads);
/*
* Execute the multiplication task again
*/
BasicLogger.debug();
BasicLogger.debug("Execution time (limited environment): {}", Stopwatch.meassure(task));
/*
* Individual operations each have a concurrency threshold that can be modified. Doing this is
* normally not recommended/needed - it's for fine-tuning ojAlgo. There is a utility method that will
* push up all thresholds to a specified minimum value. This can be useful in some cases, but the
* alternative to "limit the environment" is preferable in most cases. Here we use that utility method
* and set the minimum threshold to the dimension of the matrices we created. That effectively means
* nothing will be forked off.
*/
OjAlgoUtils.pushUpConcurrencyThresholds(DIM);
/*
* Execute the multiplication task yet again
*/
BasicLogger.debug();
BasicLogger.debug("Execution time (limited environment and high thresholds): {}", Stopwatch.meassure(task));
}
}
import static org.ojalgo.function.constant.PrimitiveMath.ONE;
import static org.ojalgo.function.constant.PrimitiveMath.ZERO;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.decomposition.QR;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.PhysicalStore;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.TransformableRegion;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Normal;
import org.ojalgo.type.Stopwatch;
/**
* Creating custom matrix stores exploiting special structures.
*
* @see https://www.ojalgo.org/2020/09/sparse-and-special-structure-matrices/
* @see https://github.com/optimatika/ojAlgo/wiki/Create-Custom-MatrixStore
*/
public class CreateCustomMatrixStore {
/**
* In addition to the required 4 methods, you probably should also implement these.
*/
static class BetterPerformingEye extends MostBasicEye {
public BetterPerformingEye(final int rowsCount, final int columnsCount) {
super(rowsCount, columnsCount);
}
/**
* For a primitive valued implementation, at least, you should implement this method
*/
@Override
public double doubleValue(final int row, final int col) {
return row == col ? ONE : ZERO;
}
/**
* May allow the standard matrix multiplication code to exploit structure, certainly possible here.
*/
@Override
public int firstInColumn(final int col) {
return Math.min(col, this.getRowDim());
}
/**
* @see #firstInColumn(int)
*/
@Override
public int firstInRow(final int row) {
return Math.min(row, this.getColDim());
}
/**
* @see #firstInColumn(int)
*/
@Override
public int limitOfColumn(final int col) {
return Math.min(col + 1, this.getRowDim());
}
/**
* @see #firstInColumn(int)
*/
@Override
public int limitOfRow(final int row) {
return Math.min(row + 1, this.getColDim());
}
/**
* Custom/optimised copy functionality.
*/
@Override
public void supplyTo(final TransformableRegion<Double> receiver) {
receiver.reset();
receiver.fillDiagonal(ONE);
}
}
/**
* There are only 4 methods you have to implement in a custom MatrixStore, but there's a whole lot you can
* to with that implementation. It's fully functional!
*/
static class MostBasicEye implements MatrixStore<Double> {
private final int myColumnsCount;
private final int myRowsCount;
public MostBasicEye(final int rowsCount, final int columnsCount) {
super();
myRowsCount = rowsCount;
myColumnsCount = columnsCount;
}
@Override
public Double get(final int row, final int col) {
return row == col ? ONE : ZERO;
}
@Override
public int getColDim() {
return myColumnsCount;
}
@Override
public int getRowDim() {
return myRowsCount;
}
@Override
public PhysicalStore.Factory<Double, R064Store> physical() {
return R064Store.FACTORY;
}
}
private static int DIM = 2000;
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(CreateCustomMatrixStore.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
MatrixStore<Double> mostBasicEye = new MostBasicEye(DIM + DIM, DIM);
MatrixStore<Double> identityAndZero = R064Store.FACTORY.makeIdentity(DIM).below(DIM);
MatrixStore<Double> betterPerformingEye = new BetterPerformingEye(DIM + DIM, DIM);
Stopwatch stopwatch = new Stopwatch();
BasicLogger.debug();
BasicLogger.debug("Compare multiplication");
PhysicalStore<Double> right = R064Store.FACTORY.makeFilled(DIM, DIM, new Normal());
PhysicalStore<Double> product = R064Store.FACTORY.make(DIM + DIM, DIM);
stopwatch.reset();
mostBasicEye.multiply(right, product);
BasicLogger.debug("MostBasicEye multiplied in {}", stopwatch.stop());
stopwatch.reset();
identityAndZero.multiply(right, product);
BasicLogger.debug("Built-in ojAlgo multiplied in {}", stopwatch.stop());
stopwatch.reset();
betterPerformingEye.multiply(right, product);
BasicLogger.debug("BetterPerformingEye multiplied in {}", stopwatch.stop());
BasicLogger.debug();
BasicLogger.debug("Compare QR decomposition");
QR<Double> decompQR = QR.R064.make(mostBasicEye);
stopwatch.reset();
decompQR.compute(mostBasicEye);
BasicLogger.debug("MostBasicEye decomposed in {}", stopwatch.stop());
stopwatch.reset();
decompQR.compute(identityAndZero);
BasicLogger.debug("Built-in ojAlgo decomposed in {}", stopwatch.stop());
stopwatch.reset();
decompQR.compute(betterPerformingEye);
BasicLogger.debug("BetterPerformingEye decomposed in {}", stopwatch.stop());
// Actually decomposing should take exactly the same time for the 3 alternatives.
// ...and the input is already in the decomposed form.
// The time difference is the time it takes to copy the input matrix
// to the decomposer's internal storage.
}
}
import java.io.File;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.data.cluster.Point;
import org.ojalgo.netio.ASCII;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.FromFileReader;
import org.ojalgo.netio.TextLineReader;
import org.ojalgo.type.CalendarDateUnit;
import org.ojalgo.type.Stopwatch;
/**
* Example how to use ojAlgo's clustering capabilities. This example uses the Mall_Customers.csv data set from
* Kaggle.
*
* @see https://www.ojalgo.org/2024/12/mall-customer-segmentation/
* @see https://www.kaggle.com/datasets/vjchoudhary7/customer-segmentation-tutorial-in-python/data
*/
public class CustomerSegmentation {
static final class MallCustomer {
static boolean filter(final String line) {
return TextLineReader.isLineOK(line) && ASCII.isDigit(line.charAt(0));
}
static MallCustomer parse(final String line) {
String[] parts = line.split(",");
int customerID = Integer.parseInt(parts[0]);
boolean gender = "Male".equals(parts[1]);
int age = Integer.parseInt(parts[2]);
int annualIncome = Integer.parseInt(parts[3]);
int spendingScore = Integer.parseInt(parts[4]);
return new MallCustomer(customerID, gender, age, annualIncome, spendingScore);
}
/**
* Age
*/
int age;
/**
* Annual Income (k$)
*/
int annualIncome;
/**
* CustomerID
*/
int customerID;
/**
* Gender (Male=true=10, Female=false=0)
*/
boolean gender;
/**
* Spending Score (1-100)
*/
int spendingScore;
MallCustomer(final int customerID, final boolean gender, final int age, final int annualIncome, final int spendingScore) {
super();
this.customerID = customerID;
this.gender = gender;
this.age = age;
this.annualIncome = annualIncome;
this.spendingScore = spendingScore;
}
}
static final File MALL_CUSTOMERS_CSV = new File("/Users/apete/Developer/data/kaggle/Mall_Customers.csv");
static final Stopwatch STOPWATCH = new Stopwatch();
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(CustomerSegmentation.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
STOPWATCH.reset();
try (FromFileReader<MallCustomer> reader = TextLineReader.of(MALL_CUSTOMERS_CSV).withFilteredParser(MallCustomer::filter, MallCustomer::parse)) {
List<MallCustomer> customers = reader.stream().collect(Collectors.toList());
BasicLogger.debug("Number of customers: {}", customers.size());
/*
* Here's how to do the clustering
*/
/*
* 1) Convert whatever data type you have to a list of Point:s. What you need to provide is a
* function that converts each (relevant) attribute to a float, and returns an array of those
* measurements. This is a very important step; it's what you control, and what you need to get
* right.
*/
List<Point> points = Point.convert(customers,
customer -> new float[] { customer.gender ? 10 : 0, customer.age, customer.annualIncome, customer.spendingScore });
/*
* 2) Call the cluster method of the Point class. It will return a list of clusters, where each
* cluster is a set of Point:s.
*/
List<Set<Point>> clusters = Point.cluster(points);
/*
* That's it! Now let's see what we got.
*/
clusters.sort(Comparator.comparing(Set<Point>::size).reversed());
BasicLogger.debug();
BasicLogger.debug("Complete data set");
BasicLogger.debug("============================================");
CustomerSegmentation.describe(points);
BasicLogger.debug("Clusters (ordered by decreasing size)");
BasicLogger.debug("============================================");
for (Set<Point> cluster : clusters) {
CustomerSegmentation.describe(cluster);
}
} catch (Exception cause) {
throw new RuntimeException(cause);
}
BasicLogger.debug("Done: {}", STOPWATCH.stop(CalendarDateUnit.MILLIS));
}
static void describe(final Collection<Point> cluster) {
BasicLogger.debug("Size: {}", cluster.size());
BasicLogger.debug("Average: {}", Point.mean(cluster));
BasicLogger.debug();
}
}
import static org.ojalgo.function.constant.PrimitiveMath.DIVIDE;
import static org.ojalgo.function.constant.PrimitiveMath.SUBTRACT;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.decomposition.QR;
import org.ojalgo.matrix.store.ElementsSupplier;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.TransformableRegion;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Normal;
import org.ojalgo.random.Uniform;
/**
* A small example to show what can be done with {@link ElementsSupplier}s and {@link TransformableRegion}s.
* To realy "see" the benefit of using these you should take this example, scale up the matrix dimensions and
* extend the loop. Then run the code in a profiler and monitor memory allocation and garbage collection.
*
* @see https://www.ojalgo.org/2021/08/common-mistake/
* @see https://github.com/optimatika/ojAlgo/wiki/Element-Consumers-and-Suppliers
*/
public class ElementConsumersAndSuppliers {
static int ITERATIONS = 3;
static int MATRIX_SIZE_SCALE = 1;
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(ElementConsumersAndSuppliers.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
Normal mormalDistribution = Normal.standard();
Uniform uniformDistribution = Uniform.standard();
// Assume you have the matrices [A],[B],[C] and [D]
R064Store mtrxA = R064Store.FACTORY.make(5 * MATRIX_SIZE_SCALE, 7 * MATRIX_SIZE_SCALE);
R064Store mtrxB = R064Store.FACTORY.make(7 * MATRIX_SIZE_SCALE, 9 * MATRIX_SIZE_SCALE);
R064Store mtrxC = R064Store.FACTORY.make(5 * MATRIX_SIZE_SCALE, 9 * MATRIX_SIZE_SCALE);
R064Store mtrxD = R064Store.FACTORY.make(9 * MATRIX_SIZE_SCALE, 5 * MATRIX_SIZE_SCALE);
QR<Double> decompQR = QR.R064.make();
// Imagine the matrices involved to be gigantic and the iterations (loop) many
for (int l = 1; l <= ITERATIONS; l++) {
// Inside this loop no array/vector/matrix memory is allocated,
// only thin wrappers and placeholders
BasicLogger.debug();
BasicLogger.debug("Iteration {}", l);
mtrxA.fillAll(uniformDistribution);
mtrxB.fillAll(mormalDistribution);
// [E] = [A][B]
ElementsSupplier<Double> placeholderE = mtrxB.premultiply(mtrxA);
// Note that the multiplication is not yet performed - it's a placeholder for a deferred operation.
BasicLogger.debug("The matrix [E] = [A][B] will have {} rows and {} columns.", placeholderE.countRows(), placeholderE.countColumns());
mtrxC.fillAll(uniformDistribution);
// [F] = [A][B] - [C]
ElementsSupplier<Double> placeholderF = placeholderE.onMatching(SUBTRACT, mtrxC);
// Still, nothing is actually done - no calculations and no memory/array allocation.
// [G] = [F]t
ElementsSupplier<Double> placeholderG = placeholderF.transpose();
// Still nothing...
// [H] = [G] ./ 2.0 (divide each element by 2.0)
ElementsSupplier<Double> placeholderH = placeholderG.onAll(DIVIDE.by(2.0));
// Still nothing...
BasicLogger.debug("The matrix [H] will have {} rows and {} columns.", placeholderH.countRows(), placeholderH.countColumns());
// Now it happens all at once!
placeholderH.supplyTo(mtrxD);
// ...and you get to decide where to put the results.
if (placeholderH.count() <= 100) {
BasicLogger.debugMatrix("Resulting D", mtrxD);
}
// You can supply it directly to a decomposition's internal storage
// (as you decompose it)
decompQR.decompose(placeholderH);
// The matrix that was decomposed never "existed" other than as an
// intermediate state of the matrix decomposition
// The elements of the intermediate placeholder H was supplied/written
// directly to the decomposition's internal storage, and then further
// worked on to perform the actual decomposition.
BasicLogger.debug();
BasicLogger.debug("Is it full rank? {}", decompQR.isFullRank());
}
}
}
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.SIGMOID;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.SOFTMAX;
import static org.ojalgo.function.constant.PrimitiveMath.DIVIDE;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.ann.ArtificialNeuralNetwork;
import org.ojalgo.ann.NetworkInvoker;
import org.ojalgo.ann.NetworkTrainer;
import org.ojalgo.array.ArrayAnyD;
import org.ojalgo.concurrent.DaemonPoolExecutor;
import org.ojalgo.concurrent.Parallelism;
import org.ojalgo.data.DataBatch;
import org.ojalgo.data.batch.BatchManager;
import org.ojalgo.data.batch.BatchNode;
import org.ojalgo.data.image.ImageData;
import org.ojalgo.matrix.store.R032Store;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.DataInterpreter;
import org.ojalgo.netio.IDX;
import org.ojalgo.netio.ToFileWriter;
import org.ojalgo.random.FrequencyMap;
import org.ojalgo.structure.Access2D;
import org.ojalgo.structure.AccessAnyD.MatrixView;
import org.ojalgo.type.CalendarDateUnit;
import org.ojalgo.type.Stopwatch;
import org.ojalgo.type.format.NumberStyle;
import org.ojalgo.type.function.TwoStepMapper;
/**
* This example program does two things:
* <ol>
* <li>Demonstrates basic usage of the BatchNode utility
* <li>Train a neural network on the Fashion-MNIST dataset from Zalando Research
* (https://github.com/zalandoresearch/fashion-mnist)
* <p>
* BatchNode is meant to be used with very very large datasets. The dataset used in this example is not very
* large at all, and we don't always do things the most efficient way. Just want to show a few different ways
* to use BatchNode.
*
* @see https://www.ojalgo.org/2022/05/introducing-batchnode/
*/
public class FashionMNISTWithBatchNode {
/**
* A simple consumer that encapsulates a, batched, neural network trainer. We can have several of these
* working concurrently.
*/
static class ConcurrentTrainer implements Consumer<DataAndLabelPair> {
private static final AtomicInteger COUNTER = new AtomicInteger();
private final DataBatch myInputBatch;
private final DataBatch myOutputBatch;
private final NetworkTrainer myTrainer;
ConcurrentTrainer(final ArtificialNeuralNetwork network, final int batchSize) {
super();
myTrainer = network.newTrainer(batchSize).rate(0.005).dropouts();
myInputBatch = myTrainer.newInputBatch();
myOutputBatch = myTrainer.newOutputBatch();
}
@Override
public void accept(final FashionMNISTWithBatchNode.DataAndLabelPair item) {
myInputBatch.addRow(item.data);
// The label is an integer [0,9] representing the digit in the image
// That label is used as the index to set a single 1.0
myOutputBatch.addRowWithSingleUnit(item.label);
if (myInputBatch.isFull()) {
myTrainer.train(myInputBatch, myOutputBatch);
myInputBatch.reset();
myOutputBatch.reset();
}
int iterations = COUNTER.incrementAndGet();
if (iterations % 1_000_000 == 0) {
BasicLogger.debug("Done {} training iterations: {}", iterations, STOPWATCH.stop(CalendarDateUnit.SECOND));
}
}
}
/**
* A simple class representing what is stored at each of the batch nodes. In this example it happens so
* that we store the same type of data at all the nodes. Usually that would not be case. It's more likely
* there is a unique type for each node.
*/
static class DataAndLabelPair {
/**
* This is what we feed the {@link BatchNode} builder so that the node knows how to read/write data
* from disk.
*/
static final DataInterpreter<DataAndLabelPair> INTERPRETER = new DataInterpreter<>() {
@Override
public FashionMNISTWithBatchNode.DataAndLabelPair deserialize(final DataInput input) throws IOException {
int nbRows = input.readInt();
int nbCols = input.readInt();
R032Store data = R032Store.FACTORY.make(nbRows, nbCols);
for (int i = 0; i < nbRows; i++) {
for (int j = 0; j < nbCols; j++) {
data.set(i, j, input.readFloat());
}
}
int label = input.readInt();
return new DataAndLabelPair(data, label);
}
@Override
public void serialize(final FashionMNISTWithBatchNode.DataAndLabelPair pair, final DataOutput output) throws IOException {
int nbRows = pair.data.getRowDim();
int nbCols = pair.data.getColDim();
output.writeInt(nbRows);
output.writeInt(nbCols);
for (int i = 0; i < nbRows; i++) {
for (int j = 0; j < nbCols; j++) {
output.writeFloat(pair.data.floatValue(i, j));
}
}
output.writeInt(pair.label);
}
};
/**
* Training data - the image
*/
R032Store data;
/**
* The correct/expected category
*/
int label;
DataAndLabelPair(final Access2D<Double> data, final int label) {
super();
if (data instanceof R032Store) {
this.data = (R032Store) data;
} else {
this.data = R032Store.FACTORY.copy(data);
}
this.label = label;
}
}
static final AtomicInteger INCREMENTOR = new AtomicInteger();
static final String[] LABELS = { "T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot" };
static final File OUTPUT_TEST_IMAGES = new File("/Users/apete/Developer/data/images/test/");
static final File OUTPUT_TRAINING_IMAGES = new File("/Users/apete/Developer/data/images/training/");
static final Stopwatch STOPWATCH = new Stopwatch();
static final File TEMP_BATCH_DIR = new File("/Users/apete/Developer/data/temp/batch/");
static final File TEST_IMAGES = new File("/Users/apete/Developer/data/fashion/t10k-images-idx3-ubyte.gz");
static final File TEST_LABELS = new File("/Users/apete/Developer/data/fashion/t10k-labels-idx1-ubyte.gz");
static final File TRAINING_IMAGES = new File("/Users/apete/Developer/data/fashion/train-images-idx3-ubyte.gz");
static final File TRAINING_LABELS = new File("/Users/apete/Developer/data/fashion/train-labels-idx1-ubyte.gz");
public static void main(final String[] args) throws IOException {
BasicLogger.debug();
BasicLogger.debug(FashionMNISTWithBatchNode.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
int numberToPrint = 10;
boolean generateImages = false;
int epochs = 128;
int batchSize = 100;
/*
* Using a BatchManager is entirely optional. It just makes it a bit simpler to create multiple
* BatchNode instances with common configurations.
*/
BatchManager batchManager = new BatchManager(TEMP_BATCH_DIR); // Specifying a temp dir for all the node data
batchManager.executor(DaemonPoolExecutor.newCachedThreadPool("Batch Worker")); // The thread pool used when processing data
batchManager.fragmentation(epochs); // The number of shards - we'll make use of the fact that it matches the epochs
batchManager.parallelism(Parallelism.CORES); // The number of worker threads
batchManager.queue(1024); // Capacity of the queues used when reading/writing to disk
/*
* Using BatchManager is optional, and also all of those configurations have usable defaults.
*/
ArrayAnyD<Double> trainingLabels = IDX.parse(TRAINING_LABELS);
ArrayAnyD<Double> trainingImages = IDX.parse(TRAINING_IMAGES);
BasicLogger.debug("Parsed IDX training data files: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* Declaring a node to store the initial data. We just specify the name of a subdirectory and how to
* "interpret" its data.
*/
BatchNode<DataAndLabelPair> initialData = batchManager.newNodeBuilder("initial", DataAndLabelPair.INTERPRETER).build();
/*
* Write to that initial data node
*/
try (ToFileWriter<DataAndLabelPair> initialDataWriter = initialData.newWriter()) {
for (MatrixView<Double> imageMatrix : trainingImages.matrices()) {
long imageIndex = imageMatrix.index();
int label = trainingLabels.intValue(imageIndex);
DataAndLabelPair pair = new DataAndLabelPair(imageMatrix, label);
initialDataWriter.write(pair);
if (generateImages) {
FashionMNISTWithBatchNode.generateImage(imageMatrix, label, OUTPUT_TRAINING_IMAGES);
}
}
} catch (Exception cause) {
throw new RuntimeException(cause);
}
BasicLogger.debug("Initial training data: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* Need to scale the matrices that make out the image. The range [0,255] should be scaled to [0,1] to
* be used as input to the neural network.
*/
BatchNode<DataAndLabelPair> scaledData = batchManager.newNodeBuilder("scaled", DataAndLabelPair.INTERPRETER).build();
try (ToFileWriter<DataAndLabelPair> scaledDataWriter = scaledData.newWriter()) {
initialData.processAll(item -> { // Read inital data
item.data.modifyAll(DIVIDE.by(255)); // Divide by 255
scaledDataWriter.write(item); // Write scaled data
});
} catch (Exception cause) {
throw new RuntimeException(cause);
}
BasicLogger.debug("Scaled training data: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* As a (wasteful) way to enable training on multiple epochs we'll create a dataset of multiple copies
* of the original.
*/
BatchNode<DataAndLabelPair> duplicatedData = batchManager.newNodeBuilder("duplicated", DataAndLabelPair.INTERPRETER)
.distributor(item -> INCREMENTOR.getAndIncrement()).build();
try (ToFileWriter<DataAndLabelPair> duplicatedDataWriter = duplicatedData.newWriter()) {
scaledData.processAll(item -> { // Read once
for (int e = 0; e < epochs; e++) { // Write 'epochs' times
duplicatedDataWriter.write(item);
} // Because of how the distributor works, and because the number
}); // of shards match the number of epochs, this will write once to each shard.
} catch (Exception cause) {
throw new RuntimeException(cause);
}
BasicLogger.debug("Duplicated training data: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* Training works better if we shuffle the data randomly. When we read the data it is essentially read
* sequeltially from the shards (a few are worked on in parallel) but we write to all shards
* simultaneously using the distributor to decide to which shard an individual data item is sent. The
* default distributor is random.
*/
BatchNode<DataAndLabelPair> randomisedData = batchManager.newNodeBuilder("randomised", DataAndLabelPair.INTERPRETER).build();
try (ToFileWriter<DataAndLabelPair> randomisedDataWriter = randomisedData.newWriter()) {
duplicatedData.processAll(randomisedDataWriter::write);
} catch (Exception cause) {
throw new RuntimeException(cause);
}
BasicLogger.debug("Randomised training data: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* Now we have a dataset that is scaled, duplicated (many times) and suffled randomly. Maybe we should
* verify it somehow. Let's just count the occurrences of the different labels. There should be an
* equal amount of each and it should be 60,000 x 128 / 10 = 768,000.
*/
/*
* This will count the keys/labels in each of the shards, and then combine the results to a single
* returned FrequencyMap.
*/
FrequencyMap<String> frequencyMap = randomisedData.reduceByMerging(() -> new TwoStepMapper.KeyCounter<>(pair -> LABELS[pair.label]));
for (int i = 0; i < LABELS.length; i++) {
BasicLogger.debug("There are {} {} instances in the scaled/duplicated/randomised traing set.", frequencyMap.getFrequency(LABELS[i]), LABELS[i]);
}
BasicLogger.debug(frequencyMap.sample());
BasicLogger.debug("Training data verified: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* This is exactly the same network structure as used in the example with the original MNIST dataset.
*/
ArtificialNeuralNetwork network = ArtificialNeuralNetwork.builder(28 * 28).layer(200, SIGMOID).layer(10, SOFTMAX).get();
/*
* We need to have 1 trainer per worker thread, so we suplly a factory rather than a direct consumer.
* Internally the BatchNode will create 1 ConcurrentTrainer per worker thread.
*/
randomisedData.processAll(() -> new ConcurrentTrainer(network, batchSize));
BasicLogger.debug("Training done: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
/*
* We have deliberately kept all node data rather than disposing as we're done with them. Set a break
* point here if you want to inspect the (full) file structure on disk. With this one call we dispose
* of everything.
*/
batchManager.dispose();
/*
* Now we need to test how the neural network performs. We wont use BatchNode for this - just do it
* the easiest most direct way.
*/
ArrayAnyD<Double> testLabels = IDX.parse(TEST_LABELS);
ArrayAnyD<Double> testImages = IDX.parse(TEST_IMAGES);
testImages.modifyAll(DIVIDE.by(255));
BasicLogger.debug("Parsed IDX test data files: {}", STOPWATCH.stop(CalendarDateUnit.SECOND));
NetworkInvoker invoker = network.newInvoker();
int correct = 0;
int wrong = 0;
for (MatrixView<Double> imageData : testImages.matrices()) {
int expected = testLabels.intValue(imageData.index());
int actual = Math.toIntExact(invoker.invoke(imageData).indexOfLargest());
if (actual == expected) {
correct++;
} else {
wrong++;
}
if (imageData.index() < numberToPrint) {
BasicLogger.debug("");
BasicLogger.debug("Image {}: {} <=> {}", imageData.index(), LABELS[expected], LABELS[actual]);
IDX.print(imageData, BasicLogger.DEBUG);
}
if (generateImages) {
FashionMNISTWithBatchNode.generateImage(imageData, expected, OUTPUT_TEST_IMAGES);
}
}
BasicLogger.debug("Done: {} or {}", STOPWATCH.stop(CalendarDateUnit.SECOND), STOPWATCH.stop(CalendarDateUnit.MINUTE));
BasicLogger.debug("");
BasicLogger.debug("===========================================================");
BasicLogger.debug("Error rate: {}", (double) wrong / (double) (correct + wrong));
}
static void generateImage(final MatrixView<Double> imageData, final long imageLabel, final File directory) throws IOException {
ToFileWriter.mkdirs(directory);
int nbRows = imageData.getRowDim();
int nbCols = imageData.getColDim();
// IDX-files and ojAlgo data structures are indexed differently.
// That doesn't matter when we're doing the math,
// but need to transpose the data when creating an image to look at.
ImageData image = ImageData.newGreyScale(nbCols, nbRows);
for (int i = 0; i < nbRows; i++) {
for (int j = 0; j < nbCols; j++) {
// The colours are stored inverted in the IDX-files (255 means "ink"
// and 0 means "no ink". In computer graphics 255 usually means "white"
// and 0 "black".) In addition the image data was previously scaled
// to be in the range [0,1]. That's why...
double grey = 255.0 * (1.0 - imageData.doubleValue(i, j));
image.set(j, i, grey);
}
}
String name = NumberStyle.toUniformString(imageData.index(), 60_000) + "_" + imageLabel + ".png";
File outputfile = new File(directory, name);
image.writeTo(outputfile);
}
}
import java.time.LocalDate;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.data.domain.finance.series.DataSource;
import org.ojalgo.data.domain.finance.series.YahooSession;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.SampleSet;
import org.ojalgo.random.process.GeometricBrownianMotion;
import org.ojalgo.random.process.RandomProcess.SimulationResults;
import org.ojalgo.series.BasicSeries;
import org.ojalgo.series.primitive.CoordinatedSet;
import org.ojalgo.series.primitive.PrimitiveSeries;
import org.ojalgo.type.CalendarDateUnit;
import org.ojalgo.type.PrimitiveNumber;
/**
* An example demonstrating how to download historical financial time series data, and how you can continue
* working with it.
*
* @see https://www.ojalgo.org/2019/04/ojalgo-v47-1-ojalgo-finance-v2-1-financial-time-series-data/
* @see https://github.com/optimatika/ojAlgo/wiki/Financial-Data
*/
public abstract class FinancialData {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(FinancialData.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
// First create the data sources
DataSource sourceMSFT = DataSource.newAlphaVantage("MSFT", CalendarDateUnit.DAY, "demo");
// Different Yahoo data sources should share a common session
YahooSession yahooSession = new YahooSession();
DataSource sourceAAPL = DataSource.newYahoo(yahooSession, "AAPL", CalendarDateUnit.DAY);
DataSource sourceIBM = DataSource.newYahoo(yahooSession, "IBM", CalendarDateUnit.DAY);
DataSource sourceORCL = DataSource.newYahoo(yahooSession, "ORCL", CalendarDateUnit.DAY);
// Fetch the data
BasicSeries<LocalDate, PrimitiveNumber> seriesIBM = sourceIBM.getLocalDateSeries(CalendarDateUnit.MONTH);
BasicSeries<LocalDate, PrimitiveNumber> seriesORCL = sourceORCL.getLocalDateSeries(CalendarDateUnit.MONTH);
BasicSeries<LocalDate, PrimitiveNumber> seriesMSFT = sourceMSFT.getLocalDateSeries(CalendarDateUnit.MONTH);
BasicSeries<LocalDate, PrimitiveNumber> seriesAAPL = sourceAAPL.getLocalDateSeries(CalendarDateUnit.MONTH);
BasicLogger.debug("Range for {} is from {} to {}", seriesMSFT.getName(), seriesMSFT.firstKey(), seriesMSFT.lastKey());
BasicLogger.debug("Range for {} is from {} to {}", seriesAAPL.getName(), seriesAAPL.firstKey(), seriesAAPL.lastKey());
BasicLogger.debug("Range for {} is from {} to {}", seriesIBM.getName(), seriesIBM.firstKey(), seriesIBM.lastKey());
BasicLogger.debug("Range for {} is from {} to {}", seriesORCL.getName(), seriesORCL.firstKey(), seriesORCL.lastKey());
// Coordinate the series - common start, end and frequency
CoordinatedSet<LocalDate> coordinationSet = CoordinatedSet.from(seriesMSFT, seriesAAPL, seriesIBM, seriesORCL);
BasicLogger.debug();
BasicLogger.debug("Common range is from {} to {}", coordinationSet.getFirstKey(), coordinationSet.getLastKey());
PrimitiveSeries primitiveMSFT = coordinationSet.getSeries(0);
PrimitiveSeries primitiveAAPL = coordinationSet.getSeries(1);
PrimitiveSeries primitiveIBM = coordinationSet.getSeries(2);
PrimitiveSeries primitiveORCL = coordinationSet.getSeries(3);
// Create sample sets of log differences
SampleSet sampleSetMSFT = SampleSet.wrap(primitiveMSFT.log().differences());
SampleSet sampleSetIBM = SampleSet.wrap(primitiveIBM.log().differences());
BasicLogger.debug();
BasicLogger.debug("Sample statistics (logarithmic differences on monthly data)");
BasicLogger.debug("MSFT: {}", sampleSetMSFT);
BasicLogger.debug("IBM: {}", sampleSetIBM);
BasicLogger.debug("Correlation: {}", sampleSetIBM.getCorrelation(sampleSetMSFT));
// Estimate stochastic process parameters based on historical data
GeometricBrownianMotion monthlyProcAAPL = GeometricBrownianMotion.estimate(primitiveAAPL, 1.0);
GeometricBrownianMotion monthlyProcORCL = GeometricBrownianMotion.estimate(primitiveORCL, 1.0);
monthlyProcAAPL.setValue(1.0); // To normalize the current value
monthlyProcORCL.setValue(1.0);
double yearsPerMonth = CalendarDateUnit.YEAR.convert(CalendarDateUnit.MONTH);
// The same thing, but annualised
GeometricBrownianMotion annualProcAAPL = GeometricBrownianMotion.estimate(primitiveAAPL, yearsPerMonth);
GeometricBrownianMotion annualProcORCL = GeometricBrownianMotion.estimate(primitiveORCL, yearsPerMonth);
annualProcAAPL.setValue(1.0); // To normalize the current value
annualProcORCL.setValue(1.0);
// Comparing the monthly and annual stochastic processes for AAPL
BasicLogger.debug();
BasicLogger.debug(" Apple Monthly proc Annual proc (6 months from now)");
BasicLogger.debug("Expected: {} {}", monthlyProcAAPL.getDistribution(6.0).getExpected(), annualProcAAPL.getDistribution(0.5).getExpected());
BasicLogger.debug("StdDev: {} {}", monthlyProcAAPL.getDistribution(6.0).getStandardDeviation(),
annualProcAAPL.getDistribution(0.5).getStandardDeviation());
BasicLogger.debug("Var: {} {}", monthlyProcAAPL.getDistribution(6.0).getVariance(), annualProcAAPL.getDistribution(0.5).getVariance());
// Comparing the annualized stochastic processes for AAPL and ORCL
BasicLogger.debug();
BasicLogger.debug(" Apple Oracle (1 year from now)");
BasicLogger.debug("Current: {} {}", annualProcAAPL.getValue(), annualProcORCL.getValue());
// getExpected() is a shortcut to getDistribution(1.0).getExpected()
BasicLogger.debug("Expected: {} {}", annualProcAAPL.getExpected(), annualProcORCL.getExpected());
// getStandardDeviation() is a shortcut to getDistribution(1.0).getStandardDeviation()
BasicLogger.debug("StdDev: {} {}", annualProcAAPL.getStandardDeviation(), annualProcORCL.getStandardDeviation());
// getVariance() is a shortcut to getDistribution(1.0).getVariance()
BasicLogger.debug("Var: {} {}", annualProcAAPL.getVariance(), annualProcORCL.getVariance());
// Simulate future scenarios for ORCL
SimulationResults simulationResults = annualProcORCL.simulate(1000, 12, yearsPerMonth);
BasicLogger.debug();
BasicLogger.debug("Simulate future Oracle: 1000 scenarios, take 12 incremental steps of size 'yearsPerMonth' (1/12)");
// There are 12 sample sets indexed 0 to 11
BasicLogger.debug("Simulated sample set: {}", simulationResults.getSampleSet(11));
// There are 1000 scenarios indexed 0 to 999
BasicLogger.debug("Simulated scenario: {}", simulationResults.getScenario(999));
// There is a shortcut to get a coordinated set
DataSource.Coordinated coordinated = DataSource.coordinated(CalendarDateUnit.DAY);
coordinated.addAlphaVantage("MSFT", "demo");
coordinated.addYahoo("AAPL");
coordinated.addYahoo("IBM");
coordinated.addYahoo("ORCL");
CoordinatedSet<LocalDate> coordinationSet2 = coordinated.get();
}
}
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.decomposition.Eigenvalue;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.netio.BasicLogger;
/**
* An example that outlines how to solve Generalised Symmetric Definite Eigenproblems
*
* @see https://www.ojalgo.org/2019/08/generalised-eigenvalue-problems/
*/
public class GeneralisedEigenvalueProblem {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(GeneralisedEigenvalueProblem.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
int dim = 5;
/*
* Create 2 random Symmetric Positive Definite matrices
*/
R064Store mtrxA = R064Store.FACTORY.makeSPD(dim);
R064Store mtrxB = R064Store.FACTORY.makeSPD(dim);
/*
* There are several generalisations. 3 are supported by ojAlgo, specified by the enum:
* Eigenvalue.Generalisation This factory method returns the most common alternative.
*/
Eigenvalue.Generalised<Double> generalisedEvD = Eigenvalue.R064.makeGeneralised(mtrxA);
// Generalisation: [A][V] = [B][V][D]
// Use 2-args alternative
generalisedEvD.computeValuesOnly(mtrxA, mtrxB);
generalisedEvD.decompose(mtrxA, mtrxB); // Use one of these methods, not both
// Alternatively, prepare and then use the usual 1-arg methods
generalisedEvD.prepare(mtrxB);
generalisedEvD.computeValuesOnly(mtrxA); // either
generalisedEvD.decompose(mtrxA); // or
// Can be called repeatedly with each "preparation"
// Useful if the B matrix doesn't change but A does
MatrixStore<Double> mtrxV = generalisedEvD.getV();
// Eigenvectors in the columns
MatrixStore<Double> mtrxD = generalisedEvD.getD();
// Eigenvalues on the diagonal
/*
* Reconstruct the left and right hand sides of the eigenproblem
*/
MatrixStore<Double> left = mtrxA.multiply(mtrxV);
MatrixStore<Double> right = mtrxB.multiply(mtrxV).multiply(mtrxD);
BasicLogger.debug();
BasicLogger.debug("Generalised Eigenproblem [A][V] = [B][V][D]");
BasicLogger.debugMatrix("[A]", mtrxA);
BasicLogger.debugMatrix("[B]", mtrxB);
BasicLogger.debugMatrix("Eigenvectors - [V]", mtrxV);
BasicLogger.debugMatrix("Eigenvalues - [D]", mtrxD);
BasicLogger.debugMatrix("Left - [A][V]", left);
BasicLogger.debugMatrix("Right - [B][V][D]", right);
}
}
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.RecoverableCondition;
import org.ojalgo.matrix.MatrixR064;
import org.ojalgo.matrix.decomposition.QR;
import org.ojalgo.matrix.store.ElementsSupplier;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.PhysicalStore;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.RawStore;
import org.ojalgo.matrix.task.InverterTask;
import org.ojalgo.matrix.task.SolverTask;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Weibull;
/**
* Getting Started with ojAlgo
*
* @see https://www.ojalgo.org/2019/03/linear-algebra-introduction/
* @see https://github.com/optimatika/ojAlgo/wiki/Getting-Started
*/
public class GettingStarted {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(GettingStarted.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
MatrixR064.Factory matrixFactory = MatrixR064.FACTORY;
PhysicalStore.Factory<Double, R064Store> storeFactory = R064Store.FACTORY;
// PrimitiveMatrix.Factory and PrimitiveDenseStore.Factory are very similar.
// Every factory in ojAlgo that makes 2D-structures
// extends/implements the same interface.
MatrixR064 matrixA = matrixFactory.makeEye(5, 5);
// Internally this creates an "eye-structure" - not a large array.
R064Store storeA = storeFactory.makeEye(5, 5);
// A PrimitiveDenseStore is always a "full array". No smart data
// structures here.
MatrixR064 matrixB = matrixFactory.makeFilled(5, 3, new Weibull(5.0, 2.0));
R064Store storeB = storeFactory.makeFilled(5, 3, new Weibull(5.0, 2.0));
// When you create a matrix with random elements you can specify
// their distribution.
/* Matrix multiplication */
MatrixR064 matrixC = matrixA.multiply(matrixB);
// Multiplying two PrimitiveMatrix:s is trivial. There are no
// alternatives, and the returned product is a PrimitiveMatrix
// (same as the inputs).
// Doing the same thing using PrimitiveDenseStore (MatrixStore) you
// have options...
BasicLogger.debug("Different ways to do matrix multiplication with " + "MatrixStore:s");
BasicLogger.debug();
MatrixStore<Double> storeC = storeA.multiply(storeB);
// One option is to do exactly what you did with PrimitiveMatrix.
// The only difference is that the return type is MatrixStore rather
// than PhysicalStore, PrimitiveDenseStore or whatever else you input.
BasicLogger.debugMatrix("MatrixStore MatrixStore#multiply(MatrixStore)", storeC);
R064Store storeCPreallocated = storeFactory.make(5, 3);
// Another option is to first create the matrix that should hold the
// resulting product,
storeA.multiply(storeB, storeCPreallocated);
// and then perform the multiplication. This enables reusing memory
// (the product matrix).
BasicLogger.debugMatrix("void MatrixStore#multiply(Access1D, ElementsConsumer)", storeCPreallocated);
ElementsSupplier<Double> storeCSupplier = storeB.premultiply(storeA);
// A third option is the premultiply method:
// 1) The left and right argument matrices are interchanged.
// 2) The return type is an ElementsSupplier rather than
// a MatrixStore.
// This is because the multiplication is not yet performed.
// It is possible to define additional operation on
// an ElementsSupplier.
ElementsSupplier<Double> storeCLater = storeCSupplier;
// The multiplication, and whatever additional operations you defined,
// is performed when you call #get().
BasicLogger.debug("ElementsSupplier MatrixStore#premultiply(Access1D)", storeCLater);
// A couple of variations that will do the same thing.
storeCPreallocated.fillByMultiplying(storeA, storeB);
BasicLogger.debug("void ElementsConsumer#fillByMultiplying(Access1D, Access1D)", storeCLater);
storeCSupplier.supplyTo(storeCPreallocated);
BasicLogger.debug("void ElementsSupplier#supplyTo(ElementsConsumer)", storeCLater);
/* Inverting matrices */
matrixA.invert();
// If you want to invert a PrimitiveMatrix then just do it...
// The matrix doesn't even have to be square or anything.
// You'll get a pseudoinverse or whatever is possible.
// With MatrixStore:s you need to use an InverterTask
InverterTask<Double> inverter = InverterTask.R064.make(storeA);
// There are many implementations of that interface. This factory
// method will return one that may be suitable, but most likely you
// will want to choose implementation based on what you know about
// the matrix.
try {
inverter.invert(storeA);
} catch (RecoverableCondition e) {
// Will throw and exception if inversion fails, rethrowing it.
throw new RuntimeException(e);
}
/* Solving equation system */
matrixA.solve(matrixC);
// PrimitiveMatrix "is" the equation system body.
// You just supply the RHS, and you get the solution.
// If necessary it will be a least squares solution, or something
// derived from the pseudoinverse.
SolverTask<Double> solver = SolverTask.R064.make(storeA, storeC);
// Similar to InverterTask:s there are SolverTask:s for the lower level stuff
try {
solver.solve(storeA, storeC);
} catch (RecoverableCondition e) {
// Will throw and exception if solving fails, rethrowing it.
throw new RuntimeException(e);
}
// Most likely you want to do is to instantiate some matrix
// decomposition (there are several). Using InverterTask or SolverTask will
// do this for you, but you may want to choose which kind of decomposition is used.
QR<Double> qr = QR.R064.make(storeA);
// You supply a typical matrix to the factory to allow it to choose implementation
// (depending on the size/shape).
qr.decompose(storeA);
// Then you do the decomposition
if (!qr.isSolvable()) {
throw new RuntimeException("Cannot solve the equation system");
}
// You should verify that the equation system is solvable,
// and do something else if it is not.
qr.getSolution(storeC);
/* Setting individual elements */
storeA.set(3, 1, 3.14);
storeA.set(3, 0, 2.18);
// PhysicalStore instances are naturally mutable. If you want to set
// or modify something - just do it
MatrixR064.DenseReceiver matrixBuilder = matrixA.copy();
// PrimitiveMatrix is immutable. To modify anything, you need to copy
// it to builder instance.
matrixBuilder.add(3, 1, 3.14);
matrixBuilder.add(3, 0, 2.18);
matrixA = matrixBuilder.get();
/* Creating matrices by explicitly setting all elements */
double[][] data = { { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 }, { 7.0, 8.0, 9.0 } };
matrixFactory.copy(RawStore.wrap(data));
storeFactory.copy(RawStore.wrap(data));
// You don't want/need to first create some (intermediate) array
// for the elements, you can of course set them on the matrix
// directly.
R064Store storeZ = storeFactory.makeEye(3, 3);
// Since PrimitiveMatrix is immutable this has to be done via
// a builder.
MatrixR064.DenseReceiver matrixZBuilder = matrixFactory.newDenseBuilder(3, 3);
for (int j = 0; j < 3; j++) {
for (int i = 0; i < 3; i++) {
matrixZBuilder.set(i, j, i * j);
storeZ.set(i, j, i * j);
}
}
MatrixR064 matrixZ = matrixZBuilder.get();
BasicLogger.debug();
BasicLogger.debugMatrix("PrimitiveMatrix Z", matrixZ);
}
}
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Variable;
import org.ojalgo.optimisation.integer.GomorySolver;
import org.ojalgo.type.context.NumberContext;
/**
* Demonstration of Gomory Mixed Integer (GMI) cuts using a MIP solver entirely implemented using these cuts
* (no branch&bound).
* <p>
* In addition this example shows how to use a non-standard (3:d party) solver as well as how to set some
* options.
*
* @see https://www.ojalgo.org/2022/04/gomory-mixed-integer-cuts/
*/
class GomoryMixedIntegerCuts {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(GomoryMixedIntegerCuts.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* A simple example taken from Branch-and-Cut Algorithms for Combinatorial Optimization Problems, by
* John E. Mitchell
*/
ExpressionsBasedModel model = new ExpressionsBasedModel();
/*
* http://eaton.math.rpi.edu/faculty/Mitchell/papers/bc_hao.pdf
*/
Variable x1 = model.newVariable("x1").integer().lower(0).weight(-6);
Variable x2 = model.newVariable("x2").integer().lower(0).weight(-5);
model.addExpression().upper(11).set(x1, 3).set(x2, 1);
model.addExpression().upper(5).set(x1, -1).set(x2, 2);
BasicLogger.debug("The model to solve");
BasicLogger.debug(model);
/*
* In that paper this example is solved using a combination of branching and cutting. Here we'll do it
* using cuts only.
*/
/*
* The GomorySolver is (in theory) capable of solving any MIP model. In practice it often runs in to
* numerical difficulties progressing very slowly, or not at all. It is not part of the standard set
* of solvers used by ExpressionsBasedModel. If we want to use it we need to explicitly add its
* integration.
*/
ExpressionsBasedModel.addIntegration(GomorySolver.INTEGRATION);
/*
* This solver is typically not as accurate as the IntegerSolver normally used, so we'll adjust the
* 'feasibility' and 'solution' options.
*/
model.options.feasibility = NumberContext.of(8);
model.options.solution = NumberContext.of(8);
/*
* To get some progress output
*/
model.options.progress(GomorySolver.class);
BasicLogger.debug();
BasicLogger.debug("Solve with progress output");
BasicLogger.debug();
BasicLogger.debug(model.minimise());
/*
* Solved it in 3 iterations! That means it generated 2 sets of cuts, 1 set after each time the
* iteration did not yield an integer solution. After the 3:d iteration the solution was considered
* integer enough (determined by the 'feasibility' option).
*/
/*
* To get more detailed debug output
*/
model.options.debug(GomorySolver.class);
BasicLogger.debug();
BasicLogger.debug();
BasicLogger.debug("Solve again with more detailed debug output");
BasicLogger.debug();
BasicLogger.debug(model.minimise());
BasicLogger.debug();
BasicLogger.debug(model);
/*
* With that debug output you see the exact cuts generated. The first set contains 2 cuts, and the
* second just 1.
*/
}
}
import java.io.File;
import java.io.IOException;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.data.image.ImageData;
import org.ojalgo.function.constant.PrimitiveMath;
import org.ojalgo.matrix.store.GenericStore;
import org.ojalgo.matrix.store.PhysicalStore;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.scalar.ComplexNumber;
import org.ojalgo.structure.Transformation2D;
/**
* Basic image processing using the FFT.
* <p>
* Demonstrates how to transform an image to the frequency domain, modify it, and transform it back.
* <p>
* In addition this program shows how to create an image in the frequency domain.
*
* @see https://www.ojalgo.org/2023/12/image-processing-using-fft/
*/
public class ImageProcessingFFT {
static final File BASE_DIR = new File("/Users/apete/Developer/data/fft/");
/**
* Reducing (removing) the image's low-frequency components; a hight-pass filter.
* <p>
* The distance is expressed in percentage of the radius of the largest circle that fits within the image
* – it'll be between 0.0 and 100.0. (Actually, on the diagonal of a square image it will go up to
* 141.42.) Try changing the filter's radius or the multiplier.
*/
static final Transformation2D<ComplexNumber> HIGH_PASS_FILTER = ImageData
.newTransformation((distance, element) -> distance <= 1.0 ? element.multiply(0.2) : element);
/**
* Reducing (removing) the image's high-frequency components; a low-pass filter.
*
* @see #HIGH_PASS_FILTER
*/
static final Transformation2D<ComplexNumber> LOW_PASS_FILTER = ImageData
.newTransformation((distance, element) -> distance >= 1.0 ? element.multiply(0.2) : element);
public static void main(final String[] args) throws IOException {
BasicLogger.debug();
BasicLogger.debug(ImageProcessingFFT.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* Read any image file (supported by java.awt.image.BufferedImage) and convert it to a grey scale
* image. We assume it's a square image, and resample it to 1024x1024 pixels.
*/
int dim = 1024;
ImageData initialImage = ImageData.read(BASE_DIR, "pexels-any-lane-5946095.jpg").convertToGreyScale().resample(dim, dim);
/*
* Write the initial, grey scale, image to disk.
*/
initialImage.writeTo(BASE_DIR, "initial.png");
/*
* Perform the 2D discrete Fourier transform, and shift the frequencies so that the zero-frequency
* components are in the centre of the image.
*/
PhysicalStore<ComplexNumber> transformed = initialImage.toFrequencyDomain();
/*
* Create an image of the squared magnitudes of the complex numbers in the frequency domain. This is
* the power spectrum of the image.
*/
ImageData.ofPowerSpectrum(transformed).writeTo(BASE_DIR, "powers.png");
/*
* Apply a filter to the frequency domain representation of the image. Here we use a low-pass filter
* to remove the high-frequency components. You can swap the filter to a high-pass filter to remove
* the low-frequency components, or you can create your own filter.
*/
transformed.modifyAny(LOW_PASS_FILTER);
/*
* Write the processed image to disk.
*/
ImageData.fromFrequencyDomain(transformed).writeTo(BASE_DIR, "processed.png");
/*
* It is possible to construct an image in the frequency domain representation. Here we create an
* image by setting a couple of specific values. Try changing the values, the location of the values
* or adding more values.
*/
GenericStore<ComplexNumber> frequencyDomain = GenericStore.C128.make(dim, dim);
/*
* With dim = 1024, the theoretical centre of the image is at (511.5, 511.5). Set values close to the
* centre. Otherwise you wont see the pattern in the image.
*/
/*
* The forward FFT is not scaled, but the inverse is. So you need to scale the values you set here.
* That's why the values are so large.
*/
frequencyDomain.set(512, 506, ComplexNumber.of(128 * dim * dim, 64 * dim * dim));
frequencyDomain.set(508, 514, ComplexNumber.makePolar(255 * dim * dim, PrimitiveMath.HALF_PI));
ImageData.fromFrequencyDomain(frequencyDomain).writeTo(BASE_DIR, "constructed.png");
}
}
import java.io.File;
import java.io.IOException;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.array.Array1D;
import org.ojalgo.data.image.ImageData;
import org.ojalgo.function.aggregator.Aggregator;
import org.ojalgo.matrix.decomposition.SingularValue;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.ShardedFile;
/**
* Basic image processing using ojAlgo.
* <p>
* Demonstrates how to "compress" an image by removing non-essential information from the image.
*
* @see https://www.ojalgo.org/2023/10/image-processing-using-singular-value-decomposition/
*/
public class ImageProcessingSVD {
private static final File BASE_DIR = new File("/Users/apete/Developer/data/compression/");
public static void main(final String[] args) throws IOException {
BasicLogger.debug();
BasicLogger.debug(ImageProcessingSVD.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
File inputFile = new File(BASE_DIR, "cityhall.png");
/*
* Read any image file (supported by java.awt.image.BufferedImage) and convert it to a grey scale
* image.
*/
ImageData initialImage = ImageData.read(inputFile).convertToGreyScale();
/*
* The length of the diagonal is also the number of singular values
*/
int nbDiagonal = initialImage.getMinDim();
int nbRows = initialImage.getRowDim();
int nbCols = initialImage.getColDim();
/*
* 1 byte per pixel in the image.
*/
double storageRequirement = nbRows * nbCols;
/*
* Never mind "sharded". This is just a convenient way to keep track of multiple files with unique
* numbers as part of the file names.
*/
ShardedFile outputFiles = ShardedFile.of(BASE_DIR, "resampled.png", 1 + nbDiagonal);
/*
* Write the original, grey scale, image to disk.
*/
File originalGreyScaleFile = outputFiles.single;
initialImage.writeTo(originalGreyScaleFile);
/*
* Calculate the SVD
*/
SingularValue<Double> decomposition = SingularValue.R064.decompose(initialImage);
Array1D<Double> singularValues = decomposition.getSingularValues();
double largestSingularValue = singularValues.doubleValue(0);
double sumOfAllSingularValues = singularValues.aggregateAll(Aggregator.SUM);
double conditionNumber = decomposition.getCondition();
BasicLogger.debug("Largest singular value={}, Sum of all={}, Condition number={}", largestSingularValue, sumOfAllSingularValues, conditionNumber);
BasicLogger.debug();
BasicLogger.debug("Rank SingularValue Rel.Mag. Aggr.Info Storage");
BasicLogger.debug("=================================================");
int[] ranks = { 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000 };
for (int r = 0, rank = 0; r < ranks.length && (rank = ranks[r]) <= nbDiagonal; r++) {
/*
* Here rank refers to the number of singular values (the number of components) used when
* reconstructing the matrix.
*/
/*
* If the rank is N then this is the N:th largest singular value (zero-based index)
*/
double singularValue = singularValues.doubleValue(rank - 1);
/*
* and this is the sum of the N first/largest singular values.
*/
double sumOfSingularValues = singularValues.aggregateRange(0, rank, Aggregator.SUM);
/*
* The part of the SVD storage we don't need when reconstructing the matrix at this rank.
*/
double noNeedToStore = (nbRows - rank) * (nbCols - rank);
BasicLogger.debugFormatted("%4d %14.2f %9.4f %9.2f%% %7.2f%%", rank, singularValue, singularValue / largestSingularValue,
100.0 * sumOfSingularValues / sumOfAllSingularValues, 100.0 * (storageRequirement - noNeedToStore) / storageRequirement);
/*
* Reconstruct the matrix using 'rank' number of components.
*/
MatrixStore<Double> recreatedMatrix = decomposition.reconstruct(rank);
/*
* Copy that matrix to a new image
*/
ImageData recreatedImage = ImageData.copy(recreatedMatrix);
/*
* Write that image to file (with the rank as part of the file name)
*/
File outputFile = outputFiles.shard(rank);
recreatedImage.writeTo(outputFile);
}
}
}
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.RawStore;
import org.ojalgo.matrix.task.iterative.ConjugateGradientSolver;
import org.ojalgo.matrix.task.iterative.GaussSeidelSolver;
import org.ojalgo.matrix.task.iterative.JacobiSolver;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.type.context.NumberContext;
/**
* Comparing different iterative solvers
*
* @see https://www.ojalgo.org/2022/05/iterative-solver-comparison/
* @see https://github.com/optimatika/ojAlgo/wiki/Iterative-Solver-Comparison
*/
public class IterativeSolverComparison {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(IterativeSolverComparison.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
BasicLogger.debug("Simple 3x3 example from linAlg34.pdf");
RawStore body = RawStore.wrap(new double[][] { { 4, 2, 3 }, { 3, -5, 2 }, { -2, 3, 8 } });
R064Store rhs = R064Store.FACTORY.column(new double[] { 8, -14, 27 });
JacobiSolver jacobi = new JacobiSolver();
// In the example the Jacobi solver is shown to converge to a sufficiently accurate solution within 50 iterations
jacobi.configurator().debug(BasicLogger.DEBUG).iterations(50).accuracy(NumberContext.of(6));
BasicLogger.debug();
BasicLogger.debug("Jacobi");
jacobi.solve(body, rhs);
GaussSeidelSolver gaussSeidel = new GaussSeidelSolver();
// Same problem and requirements/limits, but using a Gauss Seidel solver.
gaussSeidel.configurator().debug(BasicLogger.DEBUG).iterations(50).accuracy(NumberContext.of(6));
BasicLogger.debug();
BasicLogger.debug("Gauss-Seidel");
gaussSeidel.solve(body, rhs);
gaussSeidel.setRelaxationFactor(0.9);
// The Gauss Seidel solver again, but this time (under) relaxed by a factor 0.9
BasicLogger.debug();
BasicLogger.debug("Gauss-Seidel (relaxed x 0.9)");
gaussSeidel.solve(body, rhs);
BasicLogger.debug();
BasicLogger.debug("4x4 example from Hestenes and Steifel's original paper from 1952");
BasicLogger.debug("\tMethods of Conjugate Gradients for Solving Linear Systems");
RawStore body1952 = RawStore.wrap(new double[][] { { 1, 2, -1, 1 }, { 2, 5, 0, 2 }, { -1, 0, 6, 0 }, { 1, 2, 0, 3 } });
R064Store rhs1952 = R064Store.FACTORY.column(new double[] { 3, 9, 5, 6 });
ConjugateGradientSolver conjugateGradient = new ConjugateGradientSolver();
// Theoretically a conjugate gradient solver should be able to solve a 4x4 system within 4 iterations
conjugateGradient.configurator().debug(BasicLogger.DEBUG).iterations(50).accuracy(NumberContext.of(6));
BasicLogger.debug();
BasicLogger.debug("Conjugate Gradient");
conjugateGradient.solve(body1952, rhs1952);
gaussSeidel.setRelaxationFactor(1.75);
// The same problem using the Gauss Seidel solver (over) relaxed by a factor 1.75
BasicLogger.debug();
BasicLogger.debug("Gauss-Seidel (relaxed x 1.75)");
gaussSeidel.solve(body1952, rhs1952);
}
}
import java.io.File;
import java.util.Comparator;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.concurrent.Parallelism;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.integer.IntegerSolver;
import org.ojalgo.optimisation.integer.IntegerStrategy;
import org.ojalgo.optimisation.integer.NodeKey;
import org.ojalgo.type.context.NumberContext;
/**
* An introduction to the very basics of MIP solver strategy configuration.
*
* @see https://www.ojalgo.org/2022/03/mip-strategy-configuration
*/
public class MIPStrategyConfiguration {
/**
* To run this program you need to update this path. This particular model can be found among ojAlgo's
* test resources. Alternatively you could try some other MIP model.
*/
private static final File FILE = new File("/Users/apete/Developer/optimatika_git/ojAlgo/src/test/resources/optimisation/miplib/gr4x6.mps");
private static final Comparator<NodeKey> MIN_OBJECTIVE = Comparator.comparingDouble((final NodeKey k) -> k.objective).reversed();
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(MIPStrategyConfiguration.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
ExpressionsBasedModel model1 = ExpressionsBasedModel.parse(FILE);
/*
* This is how you set the strategy for the MIP solver. In this case it's just set to the default
* strategy, which is of course unnecessary to do - that's what it's set to if you do nothing.
*/
model1.options.integer(IntegerStrategy.DEFAULT);
/*
* IntegerStrategy is an interface, and you're free to provide any implementation you like. One of the
* key things it does is to act as a factory for the model/problem specific ModelStrategy. That's
* where the more advanced configurations takes place. ModelStrategy is an abstract class that allows
* you to implement or override any/all of its functionality.
*/
/*
* You do not need to re-implement everything from scratch just to change some aspect of the strategy.
* The IntegerStrategy.DEFAULT instance is configurable.
*/
IntegerStrategy configuredStrategy = IntegerStrategy.DEFAULT.withParallelism(Parallelism.FOUR).withPriorityDefinitions(MIN_OBJECTIVE);
/*
* Use 4 worker threads and always work on the subproblem with the minimum objective function value
* (from the non-integer parent node).
*/
model1.options.integer(configuredStrategy);
/*
* To see something about how the MIP solver progresses we can turn on progress logging.
*/
model1.options.progress(IntegerSolver.class);
/*
* Then solve and print the results
*/
BasicLogger.debug();
BasicLogger.debug("First attempt with configured strategy");
BasicLogger.debug("===============================================================================");
BasicLogger.debug();
BasicLogger.debug(model1.minimise());
/*
* Actually, before spending time configuring different strategies just try the default settings and
* maybe learn something about the particular problem first.
*/
BasicLogger.debug();
BasicLogger.debug("Should perhaps have tried with the default settings first...");
BasicLogger.debug("===============================================================================");
BasicLogger.debug();
ExpressionsBasedModel model2 = ExpressionsBasedModel.parse(FILE);
model2.options.progress(IntegerSolver.class);
BasicLogger.debug(model2.minimise());
/*
* From the progress logging output you can see how the objective function value improves with each
* new integer solution found. You also see how the range of possible values - the MIP gap - shrinks.
*/
/*
* [197.2642857142857, 208.7] -> FEASIBLE 202.35 @ { 20.0, 1.27675647831893E-15, 25.0, 0.0, 0.0,,,
*/
/*
* [197.2642857142857, 208.7] is the relevant node's local MIP gap where 208.7 is the previously best
* known integer solution, and 197.2642857142857 was the best value we could possibly hope for when
* removing the integrality constraints. 202.35 is the value of the newly found integer solution.
* Looks like it's possible to use a more aggressive MIP gap tolerance - we only need to consider 3
* significant digits when comparing objective function values. (We wont miss any solutions by not
* considering more significant digits.) This may translate to less work for the solver.
*/
BasicLogger.debug();
BasicLogger.debug("The default configuration with just a small adjustment to the MIP gap tolerance");
BasicLogger.debug("===============================================================================");
BasicLogger.debug();
ExpressionsBasedModel model3 = ExpressionsBasedModel.parse(FILE);
model3.options.integer(IntegerStrategy.DEFAULT.withGapTolerance(NumberContext.of(3)));
model3.options.progress(IntegerSolver.class);
BasicLogger.debug(model3.minimise());
/*
* This is a small and simple model. The various configuration changes did effect the solution time,
* and number of nodes evaluated, but regardless it solved relatively quick. With a larger more
* difficult model having the right configuration could be what makes it possiblöe to get a solution
* at all.
*/
}
}
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.RELU;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.SOFTMAX;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Error.CROSS_ENTROPY;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Error.HALF_SQUARED_DIFFERENCE;
import java.io.File;
import java.util.Arrays;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.ann.ArtificialNeuralNetwork;
import org.ojalgo.ann.NetworkBuilder;
import org.ojalgo.ann.NetworkInvoker;
import org.ojalgo.ann.NetworkTrainer;
import org.ojalgo.matrix.store.R032Store;
import org.ojalgo.matrix.store.RawStore;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.structure.Access1D;
/**
* With v48.3 the neural network code was refactored. Here's an outline of what's new or changed. (This is NOT
* a runnable program.)
*
* @see https://www.ojalgo.org/2020/09/neural-network-new-features-in-v48-3/
*/
public class NeuralNetworkNewsAndChanges_48_3 {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(NeuralNetworkNewsAndChanges_48_3.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* Networks are now built using a per layer builder. At first you specify the network's number of
* input nodes. Then you add layers, one at the time, specifying their number of output nodes and the
* activator function. The output of one layer is naturally the input to the following. This
* particular network has 4 input and 2 output nodes. Some would say this is a 3-layer network, but
* there are only 2 calculation layers. The first calculation layer has 4 input and 6 output nodes.
* The second, and final, layer has 6 input and 2 output nodes.
*/
ArtificialNeuralNetwork network = ArtificialNeuralNetwork.builder(4).layer(6, RELU).layer(2, SOFTMAX).get();
/*
* Optionally it is possible to specify which matrix factory to use internally. This will allow
* switching between double and float elements as well as different matrix representations.
*/
NetworkBuilder builder = ArtificialNeuralNetwork.builder(R032Store.FACTORY, 4);
/*
* To train a network you obtain a trainer...
*/
NetworkTrainer trainer = network.newTrainer();
/*
* That network trainer is ready to be used, but it can be reconfigured. The trainer can be configured
* with a learning rate as well as optional use of, droputs, L1 lasso regularisation and/or L2 ridge
* regularisation.
*/
trainer.rate(0.05).dropouts().lasso(0.01).ridge(0.001);
/*
* The input and output can be typed as ojAlgo's most basic data type – Access1D. Just about anything
* in ojAlgo "is" an Access1D. If you have arrays or lists of numbers then you can wrap them in
* Access1D instances to avoid copying. Most naturally you work with ojAlgo data structures from the
* beginning.
*/
Access1D<Double> input = Access1D.wrap(1, 2, 3, 4);
Access1D<Double> output = Access1D.wrap(Arrays.asList(10.0, 20.0));
/*
* Repeatedly call this, with different examples, to train the neural network.
*/
trainer.train(input, output);
/*
* To use/invoke a network you obtain an invoker... A key feature here is that you can have several
* invoker instances using the same underlying network simultaneously. The invocation specific state
* is in the invoker instance.
*/
NetworkInvoker invoker1 = network.newInvoker();
NetworkInvoker invoker2 = network.newInvoker();
output = invoker1.invoke(input);
/*
* Trained networks can be saved to file, and then used later
*/
File file = new File("somePath");
network.writeTo(file);
ArtificialNeuralNetwork network2 = ArtificialNeuralNetwork.from(file);
/*
* It's also possible to specify a (different) matrix factory when reading a network from file.
*/
ArtificialNeuralNetwork network3 = ArtificialNeuralNetwork.from(RawStore.FACTORY, file);
/*
* What about specifying the error/loss function when training? ojAlgo supports 2 different error
* functions, and which to use is dictated by the activator of the final layer. CROSS_ENTROPY has to
* be used with the SOFTMAX activator, and cannot be used with any other. The correct error function
* is set for you. You can manually set it, but if you set the incorrect one you'll get an exception.
*/
trainer.error(CROSS_ENTROPY);
trainer.error(HALF_SQUARED_DIFFERENCE);
}
}
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.util.DoubleSummaryStatistics;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.concurrent.ProcessingService;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.SegmentedFile;
import org.ojalgo.netio.SegmentedFile.Segment;
import org.ojalgo.netio.TextLineReader;
import org.ojalgo.type.CalendarDateUnit;
import org.ojalgo.type.Stopwatch;
import org.ojalgo.type.function.TwoStepMapper;
import org.ojalgo.type.keyvalue.EntryPair;
import org.ojalgo.type.keyvalue.EntryPair.KeyedPrimitive;
/**
* An example of how to use the SegmentedFile and ProcessingService classes to process a large file in
* parallel.
* <p>
* This is an implementation of the 1BRC using ojAlgo.
*
* @see https://github.com/gunnarmorling/1brc
* @see https://www.ojalgo.org/2024/02/1brc-using-ojalgo/
*/
public class OneBRC {
/**
* There will be one instance of this class per station (city). It is used to aggregate the temperature
* measurements for that station. Most 1BRC submissions have a similar class. The
* {@link DoubleSummaryStatistics} could have been used for this, but it has some overhead due to handling
* of infinities and NaNs not needed here.
*/
static final class Aggregator {
static double round(final double value) {
return Math.round(value * 10.0) / 10.0;
}
int count = 0;
double max = Double.NEGATIVE_INFINITY;
double min = Double.POSITIVE_INFINITY;
double sum = 0.0;
@Override
public String toString() {
return min + "/" + Aggregator.round(sum / count) + "/" + max;
}
void merge(final Aggregator other) {
if (other.min < min) {
min = other.min;
}
if (other.max > max) {
max = other.max;
}
sum += other.sum;
count += other.count;
}
void put(final double value) {
if (value < min) {
min = value;
}
if (value > max) {
max = value;
}
sum += value;
count++;
}
}
/**
* A file segment processor. This class implements much of the actual processing specific for this use
* case. Since it implements the TwoStepMapper interface there is standard functionality implemented in
* the ProcessingService class we can use. More precisely since it implements the
* TwoStepMapper.Combineable sub-interface it can be used with the
* ProcessingService.reduceCombineable(...) method, and we'll make use of that.
*/
static final class Processor implements TwoStepMapper.Combineable<SegmentedFile.Segment, SortedMap<String, Aggregator>, Processor> {
/**
* The standard Java Double.parseDouble(...) method is not used since it is too slow. This method only
* handles precisely the format used in the 1BRC data files, and is therefore much faster.
*/
private static double parseDouble(final String sequence, final int first, final int limit) {
int unscaled = 0;
boolean negative = false;
for (int i = first; i < limit; i++) {
char digit = sequence.charAt(i);
switch (digit) {
case '-':
negative = true;
break;
case '.':
break;
default:
unscaled = 10 * unscaled + digit - '0';
break;
}
}
if (negative) {
return -unscaled / 10D;
} else {
return unscaled / 10D;
}
}
/**
* The actual temperature measurements. The key is the station (city) name, and the value is the
* Aggregator instance for that station.
*/
private final Map<String, Aggregator> myAggregates = new HashMap<>(512);
/**
* The SegmentedFile instance this Processor is processing data from.
*/
private final SegmentedFile mySegmentedFile;
Processor(final SegmentedFile segmentedFile) {
super();
mySegmentedFile = segmentedFile;
}
/**
* Combine the internal state from another Processor instance into this one.
*/
@Override
public void combine(final Processor other) {
for (Map.Entry<String, Aggregator> entry : other.getEntries()) {
myAggregates.computeIfAbsent(entry.getKey(), k -> new Aggregator()).merge(entry.getValue());
}
}
/**
* Consume the data from one file segment – process all the lines in that file segment.
*/
@Override
public void consume(final Segment segment) {
try (TextLineReader reader = mySegmentedFile.newTextLineReader(segment)) {
reader.forEach(this::doOneLine);
} catch (IOException cause) {
throw new UncheckedIOException(cause);
}
}
/**
* Get the final sorted results
*/
@Override
public SortedMap<String, Aggregator> getResults() {
return new TreeMap<>(myAggregates);
}
/**
* Reset the internal state of this instance (to enable re-use).
*/
@Override
public void reset() {
myAggregates.clear();
}
/**
* Parse one line of data, and add the temperature measurement to the correct station aggregator.
*/
private void doOneLine(final String line) {
int split = line.indexOf(";");
String station = line.substring(0, split);
double temperature = Processor.parseDouble(line, split + 1, line.length());
myAggregates.computeIfAbsent(station, key -> new Aggregator()).put(temperature);
}
Set<Entry<String, OneBRC.Aggregator>> getEntries() {
return myAggregates.entrySet();
}
}
static final File FILE = new File("/Users/apete/Developer/data/1BRC/measurements.txt");
/**
* A ProcessingService is a thin utility wrapper around an ExecutorService. It simplifies some processing.
* In particular it makes use of a TwoStepMapper to process a collection of work items. It's somewhat like
* a Collector (used with streams) but all in one interface.
*/
static final ProcessingService PROCSERV = ProcessingService.INSTANCE;
public static void main(final String[] args) throws IOException {
BasicLogger.debug();
BasicLogger.debug(OneBRC.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
SortedMap<String, OneBRC.Aggregator> results = null;
/*
* First we'll execute a re-implementation of the 1BRC baseline. The code is more or less the same as
* the original, but it's using the Aggregator built for the ojAlgo version of the 1BRC as well as
* KeyedPrimitive and EntryPair that are part of the ojAlgo library.
*/
Stopwatch stopwatch = new Stopwatch();
Collector<KeyedPrimitive<String>, ?, Map<String, OneBRC.Aggregator>> collector = Collectors.groupingBy(KeyedPrimitive::getKey,
Collector.of(Aggregator::new, (aggr, pair) -> {
aggr.put(pair.doubleValue());
}, (aggr1, aggr2) -> {
aggr1.merge(aggr2);
return aggr1;
}));
results = null;
BasicLogger.debug();
stopwatch.reset();
results = new TreeMap<>(Files.lines(FILE.toPath()).map(OneBRC::parseLine).collect(collector));
BasicLogger.debug(results);
BasicLogger.debug("Baseline: {}", stopwatch.stop(CalendarDateUnit.SECOND));
/*
* Baseline parallelised – exactly the same code just using parallel streams instead.
*/
results = null;
BasicLogger.debug();
stopwatch.reset();
results = new TreeMap<>(Files.lines(FILE.toPath()).parallel().map(OneBRC::parseLine).collect(collector));
BasicLogger.debug(results);
BasicLogger.debug("Parallelised: {}", stopwatch.stop(CalendarDateUnit.SECOND));
/*
* Now the ojAlgo way.
*/
results = null;
BasicLogger.debug();
stopwatch.reset();
try (SegmentedFile segmented = SegmentedFile.of(FILE)) {
results = PROCSERV.reduceCombineable(segmented.segments(), () -> new Processor(segmented));
BasicLogger.debug(results);
}
/*
* The most important difference is that the ojAlgo version uses SegmentedFile and a ProcessingService
* rather than Java's standard Files.lines(...) and parallel streams. The ojAlgo version is also
* parallelised, but it's done in a more controlled way. The ojAlgo version is also more memory
* efficient and more scalable.
*/
BasicLogger.debug("ojAlgo: {}", stopwatch.stop(CalendarDateUnit.SECOND));
}
/**
* Used to re-implement the 1BRC baseline.
*/
static KeyedPrimitive<String> parseLine(final String line) {
String[] parts = line.split(";");
return EntryPair.of(parts[0], Double.parseDouble(parts[1]));
}
}
import java.io.File;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Optimisation.Result;
import org.ojalgo.optimisation.service.ServiceIntegration;
/**
* A program that shows how to use Optimatika's Optimisation-as-a-Service.
*
* @see https://www.ojalgo.org/2022/10/optimisation-as-a-service/
*/
public class OptimisationAsAService {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(OptimisationAsAService.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* The service is essentially a collection of solvers running on a remote machine. Its front is an
* implementation of the Optimisation.Solver interface that simply forwards the problem. To be able to
* make use of this there needs to be an implementation of the Optimisation.Integration interface
* specific for that forwarding solver. When we instantiate such an integration we need to provide
* information about where the service is located.
*/
ServiceIntegration serviceIntegration = ServiceIntegration.newInstance("http://test-service.optimatika.se");
/*
* ...and then we need to configure ExpressionsBasedModel to make use of it.
*/
ExpressionsBasedModel.addIntegration(serviceIntegration);
/*
* That's it! Everything else is the same as it would be in any other case.
*/
File file = new File("./testprob.mps"); // File containing an optimisation problem
ExpressionsBasedModel model = ExpressionsBasedModel.parse(file);
Result result = model.minimise(); // Solution to that problem
BasicLogger.debug("Minimised => " + result);
/*
* The main purpose of having an optimisation service is that it's possible to offload heavy workload
* to a separate machine running more/other/better solvers than the built-in pure Java ojAlgo solvers.
*/
/*
* In fact maybe you want to solve some smaller models locally, and only use the service for larger
* models.
*/
ExpressionsBasedModel.Integration<?> configuredIntegration = serviceIntegration.withCapabilityPredicate(m -> m.countVariables() > 1000);
/*
* With that a new integration is created, configured to only report that it's capable of solving a
* particular model if there are more than 1000 variables. If it's not "capable" the default solvers
* will be used instead.
*/
/*
* To use that configured service integration rather than the unconfigured we used before it needs to
* be cleared first.
*/
ExpressionsBasedModel.clearIntegrations();
ExpressionsBasedModel.addIntegration(configuredIntegration);
result = model.minimise(); // This time solved locally since that test problem is very small
BasicLogger.debug("Minimised => " + result);
}
}
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.*;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Error.CROSS_ENTROPY;
import java.util.Collections;
import org.ojalgo.ann.ArtificialNeuralNetwork;
import org.ojalgo.ann.NetworkTrainer;
import org.ojalgo.structure.Access1D;
/**
* This is not a functioning program. It's just something that outlines the basic steps involved in using
* ojAlgo's artificial neural network functionality.
*
* @see https://www.ojalgo.org/2018/09/neural-network-basics/
*/
public class OutlineNeuralNetworkUsage {
public static void main(final String[] args) {
/**
* Start by instantiating a neural network builder/trainer. This particular network has 200 input and
* 10 output nodes. The numbers 100 and 50 define the hidden layers. Some would say this is a 4-layer
* network, but there are only 3 calculation layers. The first layer has 200 input and 100 output
* nodes. The second layer has 100 input and 50 output nodes, and ly the third 50 inputs and 10
* outputs.
*/
NetworkTrainer builder = ArtificialNeuralNetwork.builder(200, 100, 50, 10);
/**
* That network builder/trainer is ready to be used, but it can be reconfigured. You can (re)define
* which kind of activator function should be used for each of the calculation layers. You can
* (re)define how to meassure error/loss, and you can modify the learning rate.
*/
builder.activators(SIGMOID, RELU, SOFTMAX).error(CROSS_ENTROPY).rate(0.1);
/**
* The input and output can be typed as ojAlgo's most basic data type – Access1D. Just about anything
* in ojAlgo "is" an Access1D. If you have arrays or lists of numbers then you can wrap them in
* Access1D instances to avoid copying. Most natuarally you work with ojAlgo data structures from the
* beginning.
*/
Access1D<Double> input = Access1D.wrap(new double[] { 1, 2, 3 }); // To match the builder this should have 200 elements,
Access1D<Double> output = Access1D.wrap(Collections.singletonList(4.0)); // and this should have 10.
/**
* Repeatedly call this to train the neural network.
*/
builder.train(input, output);
/**
* When you're done training you get the finished product.
*/
ArtificialNeuralNetwork network = builder.get();
/**
* You evaluate/use the trained neural network by invoking it (it's a function).
*/
output = network.newInvoker().invoke(input);
}
}
import static org.ojalgo.type.CalendarDateUnit.MILLIS;
import java.time.Instant;
import java.util.Random;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.array.ArrayR064;
import org.ojalgo.array.LongToNumberMap;
import org.ojalgo.array.SparseArray;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.SparseStore;
import org.ojalgo.matrix.task.iterative.ConjugateGradientSolver;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.series.BasicSeries;
import org.ojalgo.type.Stopwatch;
/**
* Example use of SparseStore and other special MatrixStore implementations.
*
* @see https://www.ojalgo.org/2020/09/sparse-and-special-structure-matrices/
* @see https://github.com/optimatika/ojAlgo/wiki/Sparse-Matrices
*/
public class SparseMatrices {
private static String NON_ZEROS = "{} non-zeroes out of {} matrix elements calculated in {}";
private static Random RANDOM = new Random();
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(SparseMatrices.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
int dim = 100_000;
SparseStore<Double> mtrxA = SparseStore.R064.make(dim, dim);
SparseStore<Double> mtrxB = SparseStore.R064.make(dim, dim);
SparseStore<Double> mtrxC = SparseStore.R064.make(dim, dim);
MatrixStore<Double> mtrxZ = R064Store.FACTORY.makeZero(dim, dim);
MatrixStore<Double> mtrxI = R064Store.FACTORY.makeIdentity(dim);
// 5 matrices * 100k rows * 100k cols * 8 bytes per element => would be more than 372GB if dense
// This program runs with default settings of any JVM
for (int i = 0; i < dim; i++) {
int j = RANDOM.nextInt(dim);
double val = RANDOM.nextDouble();
mtrxA.set(i, j, val);
} // Each row of A contains 1 non-zero element at random column
for (int j = 0; j < dim; j++) {
int i = RANDOM.nextInt(dim);
double val = RANDOM.nextDouble();
mtrxB.set(i, j, val);
} // Each column of B contains 1 non-zero element at random row
Stopwatch stopwatch = new Stopwatch();
BasicLogger.debug();
BasicLogger.debug("Sparse-Sparse multiplication");
stopwatch.reset();
mtrxA.multiply(mtrxB, mtrxC);
BasicLogger.debug(NON_ZEROS, mtrxC.nonzeros().estimateSize(), mtrxC.count(), stopwatch.stop(MILLIS));
// It's the left matrix "this" that decides the multiplication algorithm,
// and it knows nothing about the input/right matrix other than that it implements the required interface.
// It could be another sparse matrix as in the example above. It could be a full/dense matrix. Or, it could something else...
// Let's try an identity matrix...
BasicLogger.debug();
BasicLogger.debug("Sparse-Identity multiplication");
stopwatch.reset();
mtrxA.multiply(mtrxI, mtrxC);
BasicLogger.debug(NON_ZEROS, mtrxC.nonzeros().estimateSize(), mtrxC.count(), stopwatch.stop(MILLIS));
// ...or an all zeros matrix...
BasicLogger.debug();
BasicLogger.debug("Sparse-Zero multiplication");
stopwatch.reset();
mtrxA.multiply(mtrxZ, mtrxC);
BasicLogger.debug(NON_ZEROS, mtrxC.nonzeros().estimateSize(), mtrxC.count(), stopwatch.stop(MILLIS));
// What if we turn things around so that the identity or zero matrices become "this" (the left matrix)?
BasicLogger.debug();
BasicLogger.debug("Identity-Sparse multiplication");
stopwatch.reset();
mtrxI.multiply(mtrxB, mtrxC);
BasicLogger.debug(NON_ZEROS, mtrxC.nonzeros().estimateSize(), mtrxC.count(), stopwatch.stop(MILLIS));
BasicLogger.debug();
BasicLogger.debug("Zero-Sparse multiplication");
stopwatch.reset();
mtrxZ.multiply(mtrxB, mtrxC);
BasicLogger.debug(NON_ZEROS, mtrxC.nonzeros().estimateSize(), mtrxC.count(), stopwatch.stop(MILLIS));
// Q: Why create identity and zero matrices?
// A: The can be used as building blocks for larger logical structures.
MatrixStore<Double> mtrxL = mtrxI.right(mtrxA).below(mtrxZ, mtrxB);
// There's much more you can do with that logical builder...
BasicLogger.debug();
BasicLogger.debug("Scale logical structure");
stopwatch.reset();
MatrixStore<Double> mtrxScaled = mtrxL.multiply(3.14);
BasicLogger.debug("{} x {} matrix scaled in {}", mtrxScaled.countRows(), mtrxScaled.countColumns(), stopwatch.stop(MILLIS));
// By now we would have passed 1TB, if dense
// Other data structures that are sparse, include:
SparseArray<Double> sparseArray = SparseArray.factory(ArrayR064.FACTORY).make(dim);
LongToNumberMap<Double> longToNumberMap = LongToNumberMap.factory(ArrayR064.FACTORY).make();
BasicSeries<Instant, Double> series = BasicSeries.INSTANT.build(ArrayR064.FACTORY);
ConjugateGradientSolver conjugateGradientSolver = new ConjugateGradientSolver();
}
}
import static org.ojalgo.function.constant.PrimitiveMath.DIVIDE;
import java.util.Locale;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.data.DataProcessors;
import org.ojalgo.function.aggregator.Aggregator;
import org.ojalgo.matrix.decomposition.Eigenvalue;
import org.ojalgo.matrix.decomposition.SingularValue;
import org.ojalgo.matrix.store.MatrixStore;
import org.ojalgo.matrix.store.R064Store;
import org.ojalgo.matrix.store.RawStore;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.type.context.NumberContext;
/**
* PCA example using data from StatQuest's video "Principal Component Analysis (PCA), Step-by-Step".
*
* @see https://www.ojalgo.org/2019/03/statquest-pca-example/
*/
public class StatQuestPCA {
private static final NumberContext PERCENTAGE = NumberContext.getPercent(Locale.getDefault());
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(StatQuestPCA.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
MatrixStore<Double> data2 = RawStore.wrap(new double[][] { { 10, 11, 8, 3, 2, 1 }, { 6, 4, 5, 3, 2.8, 1 } }).transpose();
MatrixStore<Double> data3 = RawStore.wrap(new double[][] { { 10, 11, 8, 3, 2, 1 }, { 6, 4, 5, 3, 2.8, 1 }, { 12, 9, 10, 2.5, 1.3, 2 } }).transpose();
MatrixStore<Double> data4 = RawStore
.wrap(new double[][] { { 10, 11, 8, 3, 2, 1 }, { 6, 4, 5, 3, 2.8, 1 }, { 12, 9, 10, 2.5, 1.3, 2 }, { 5, 7, 6, 2, 4, 7 } }).transpose();
// Change between data2, data3 and data4 to get the different results
R064Store data = R064Store.FACTORY.copy(data4);
BasicLogger.debugMatrix("Data", data);
long numberOfVariables = data.countColumns();
long numberOfSamples = data.countRows();
BasicLogger.debug("There are {} variables and {} samples.", numberOfVariables, numberOfSamples);
/*
* First we'll calculate the covariance and correlations matrices from the variable samples, and then
* (using the covariance matrix) we'll calculate the EvD. We do this to verify the results of the SVD
* method, and to show how the EvD and the SVD are related.
*/
// First we create the covariance matrix. Forget about EvD, SVD, PCA...
// This is simply the covariances of the sampled variables.
R064Store covariances = DataProcessors.covariances(R064Store.FACTORY, data);
BasicLogger.debug();
BasicLogger.debugMatrix("Covariances", covariances);
// Then calculate the EvD of that covariance matrix.
Eigenvalue<Double> evd = Eigenvalue.R064.make(covariances);
evd.decompose(covariances);
BasicLogger.debug();
BasicLogger.debug("EvD");
BasicLogger.debugMatrix("Eigenvalues (on the diagonal)", evd.getD());
BasicLogger.debugMatrix("Eigenvectors (in the columns)", evd.getV());
double sumOfEigenvalues = evd.getD().aggregateDiagonal(Aggregator.SUM);
BasicLogger.debug();
BasicLogger.debug("Relative (Variance) Importance: {}", evd.getD().onAll(DIVIDE.by(sumOfEigenvalues)).sliceDiagonal());
/*
* Now let's start with the actual SVD
*/
// Center the meassurements for each of the variables
data.modifyAny(DataProcessors.CENTER);
BasicLogger.debug();
BasicLogger.debugMatrix("Data (centered)", data);
SingularValue<Double> svd = SingularValue.R064.make(data);
svd.decompose(data);
BasicLogger.debug();
BasicLogger.debug("SVD");
BasicLogger.debugMatrix("Left-singular Vectors (in the columns)", svd.getU());
BasicLogger.debugMatrix("Singular values (on the diagonal)", svd.getD());
BasicLogger.debugMatrix("Right-singular Vectors (in the columns) - compare these to the eigenvectors above", svd.getV());
// Singular Value corresponding to PC1
double pc1SV = svd.getD().doubleValue(0, 0);
// Singular Value corresponding to PC2
double pc2SV = svd.getD().doubleValue(1, 1);
double pc1Variance = pc1SV * pc1SV / (numberOfSamples - 1);
double pc2Variance = pc2SV * pc2SV / (numberOfSamples - 1);
double sumOfSquaredSingularValues = svd.getD().aggregateDiagonal(Aggregator.SUM2);
double sumOfVariances = sumOfSquaredSingularValues / (numberOfSamples - 1);
// The sum of the eigenvalues (of the covariance matrix) should equal
// the sum of the squared singular values (of the centered data)
// divided by the degrees of freedom.
BasicLogger.debug();
BasicLogger.debug("Sum of eigenvalues/variance: {} == {}", sumOfEigenvalues, sumOfVariances);
BasicLogger.debug();
BasicLogger.debug("PC1: Variance={} ({}%) Loadings={}", pc1Variance, PERCENTAGE.format(pc1Variance / sumOfVariances), svd.getV().sliceColumn(0));
BasicLogger.debug("PC2: Variance={} ({}%) Loadings={}", pc2Variance, PERCENTAGE.format(pc2Variance / sumOfVariances), svd.getV().sliceColumn(1));
// The loadings describe how to create the principal components from the original
// variables. Multiply the data matrix by each of the loadings vectors you want to use.
// In this example we keep the 2 first, most important, principal components.
// Then we get a matrix with 2 columns corresponding to PC1 and PC2.
BasicLogger.debug();
BasicLogger.debugMatrix("Data (transformed)", data.multiply(svd.getV().columns(0, 1)));
// There is another way to get the same thing from the SVD.
// The left-singular vectors scaled by their corresponding singular values.
BasicLogger.debug();
BasicLogger.debugMatrix("Transformed data (derived another way) - compare the 2 first columns with what we just calculated above",
svd.getU().multiply(svd.getD()));
// It's also possible to recreate the covariance matrix from the SVD
BasicLogger.debug();
BasicLogger.debugMatrix("Covariances (from SVD) – compare this to what we originally calculated", DataProcessors.covariances(R064Store.FACTORY, svd));
// And when we construct the covariance matrix from the SVD,
// we can optionally remove some noise
BasicLogger.debugMatrix("Covariances (from SVD using only 2 components)", DataProcessors.covariances(R064Store.FACTORY, svd, 2));
}
}
NAME TESTPROB
ROWS
N COST
L LIM1
G LIM2
E MYEQN
COLUMNS
XONE COST 1 LIM1 1
XONE LIM2 1
YTWO COST 4 LIM1 1
YTWO MYEQN -1
ZTHREE COST 9 LIM2 1
ZTHREE MYEQN 1
RHS
RHS1 LIM1 5 LIM2 10
RHS1 MYEQN 7
BOUNDS
UP BND1 XONE 4
LO BND1 YTWO -1
UP BND1 YTWO 1
ENDATA
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.Expression;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Optimisation;
import org.ojalgo.optimisation.Variable;
/**
* An example of how to implement a simple optimisation problem - The Diet Problem. It's one of the earliest
* real-life problems to be solved using linear programming.
*
* @see https://www.ojalgo.org/2019/05/the-diet-problem/
* @see https://github.com/optimatika/ojAlgo/wiki/The-Diet-Problem
*/
public class TheDietProblem {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(TheDietProblem.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
// Create a new model.
ExpressionsBasedModel model = new ExpressionsBasedModel();
// Add variables expressing servings of each of the considered foods
// Set lower and upper limits on the number of servings as well as the weight (cost of a
// serving) for each variable.
Variable bread = model.newVariable("Bread").lower(0).upper(10).weight(0.05);
Variable corn = model.newVariable("Corn").lower(0).upper(10).weight(0.18);
Variable milk = model.newVariable("Milk").lower(0).upper(10).weight(0.23);
// Create a vitamin A constraint.
// Set lower and upper limits and then specify how much vitamin A a serving of each of the
// foods contain.
Expression vitaminA = model.newExpression("Vitamin A").lower(5000).upper(50000);
vitaminA.set(bread, 0).set(corn, 107).set(milk, 500);
// Create a calories constraint...
Expression calories = model.newExpression("Calories").lower(2000).upper(2250);
calories.set(bread, 65).set(corn, 72).set(milk, 121);
// Solve the problem - minimise the cost
Optimisation.Result result = model.minimise();
// Print the result
BasicLogger.debug();
BasicLogger.debug(result);
BasicLogger.debug();
// Modify the model to require an integer valued solution.
BasicLogger.debug("Adding integer constraints...");
bread.integer(true);
corn.integer(true);
milk.integer(true);
// Solve again
result = model.minimise();
// Print the result, and the model
BasicLogger.debug();
BasicLogger.debug(result);
BasicLogger.debug();
BasicLogger.debug(model);
BasicLogger.debug();
}
}
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.Expression;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Optimisation.Result;
import org.ojalgo.optimisation.Variable;
/**
* What's largest number of nuggest you can order at McDonalds that they can't deliver (that exact quantity)?
*
* @see https://www.ojalgo.org/2019/05/the-mcnuggets-challenge/
* @see https://github.com/optimatika/ojAlgo/wiki/The-McNuggets-Challenge
*/
public class TheMcNuggetsChallenge {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(TheMcNuggetsChallenge.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
ExpressionsBasedModel model = new ExpressionsBasedModel();
Variable numberOfPack6 = model.newVariable("#6-packs").lower(0).integer(true);
Variable numberOfPack9 = model.newVariable("#9-packs").lower(0).integer(true);
Variable numberOfPack20 = model.newVariable("#20-packs").lower(0).integer(true);
Expression totalNuggetsOrdered = model.addExpression().weight(1);
totalNuggetsOrdered.set(numberOfPack6, 6);
totalNuggetsOrdered.set(numberOfPack9, 9);
totalNuggetsOrdered.set(numberOfPack20, 20);
for (int i = 100; i > 0; i--) {
totalNuggetsOrdered.upper(i);
Result result = model.maximise();
if (Math.round(result.getValue()) < i) {
BasicLogger.debug("Not possible to order {} nuggets", i);
break;
} else {
BasicLogger.debug(result);
}
}
}
}
import java.math.BigDecimal;
import java.time.Instant;
import java.time.LocalDate;
import java.util.OptionalDouble;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.machine.JavaType;
import org.ojalgo.machine.MemoryEstimator;
import org.ojalgo.netio.BasicLogger;
/**
* A tiny example demonstrating the use of the MemoryEstimator utility.
*
* @see https://www.ojalgo.org/2022/12/the-memory-estimator/
*/
public class TheMemoryEstimator {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(TheMemoryEstimator.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* To estimate the memory footprint of an array of any Java type, you just specify which type and the
* length of the array, and then call 'estimateArray'.
*/
BasicLogger.debug();
BasicLogger.debug("Size of byte[] of different lengths");
BasicLogger.debug("===================================");
for (int i = 0; i < 10; i++) {
long estimate = MemoryEstimator.estimateArray(byte.class, i);
BasicLogger.debug("byte[{}] == {} bytes", i, estimate);
}
/*
* There is also an enum named JavaType that enumerates Java's basic types.
*/
BasicLogger.debug();
BasicLogger.debug("Memory footprint of Java's basic types");
BasicLogger.debug("======================================");
for (JavaType type : JavaType.values()) {
BasicLogger.debug("{} {} bytes", type.getJavaClass().getSimpleName(), type.memory());
BasicLogger.debug(1, "{} bytes when wrapped/boxed in a class", type.estimateSizeOfWrapperClass());
}
/*
* It's also possible to estimate the size of any Java object.
*/
BasicLogger.debug();
BasicLogger.debug("Memory footprint of some specific types");
BasicLogger.debug("=======================================");
BasicLogger.debug("BigDecimal == {} bytes", MemoryEstimator.estimateObject(BigDecimal.class));
BasicLogger.debug("LocalDate == {} bytes", MemoryEstimator.estimateObject(LocalDate.class));
BasicLogger.debug("Instant == {} bytes", MemoryEstimator.estimateObject(Instant.class));
BasicLogger.debug("OptionalDouble == {} bytes", MemoryEstimator.estimateObject(OptionalDouble.class));
/*
* Estimating the size of complex data types containing arrays, collections and multiple references to
* other objects is meaningless (the way MemoryEstimator works). Even with the few examples above
* there's a problem. BigDecimal contains a reference to a BigInteger, but the estimate only includes
* the size of the reference to that instance – not the size of the BigInteger instance itself.
*/
}
}
import java.io.File;
import java.io.IOException;
import java.time.LocalDate;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.data.domain.finance.series.DataSource;
import org.ojalgo.data.domain.finance.series.FinanceDataReader;
import org.ojalgo.data.domain.finance.series.YahooParser;
import org.ojalgo.data.domain.finance.series.YahooParser.Data;
import org.ojalgo.matrix.store.R032Store;
import org.ojalgo.netio.ASCII;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.TextLineWriter;
import org.ojalgo.netio.TextLineWriter.CSVLineBuilder;
import org.ojalgo.random.SampleSet;
import org.ojalgo.random.process.RandomProcess.SimulationResults;
import org.ojalgo.random.process.StationaryNormalProcess;
import org.ojalgo.random.scedasticity.GARCH;
import org.ojalgo.series.BasicSeries;
import org.ojalgo.series.primitive.CoordinatedSet;
import org.ojalgo.series.primitive.PrimitiveSeries;
import org.ojalgo.type.CalendarDateUnit;
import org.ojalgo.type.PrimitiveNumber;
import org.ojalgo.type.StandardType;
/**
* Example use of GARCH models, and in addition some general time series handling.
*
* @see https://www.ojalgo.org/2022/07/generalized-autoregressive-conditional-heteroscedasticity/
*/
public abstract class TimeSeriesGARCH {
/**
* A file containing historical data for the Nikkei 225 index, downloaded from Yahoo Finance.
*
* @see https://finance.yahoo.com/quote/^N225/history
*/
static final File N225 = new File("/Users/apete/Developer/data/finance/^N225.csv");
/**
* Where to write output data for the chart.
*/
static final File OUTPUT = new File("/Users/apete/Developer/data/finance/output.csv");
/**
* A file containing historical data for the Russell 2000 index, downloaded from Yahoo Finance.
*
* @see https://finance.yahoo.com/quote/^RUT/history
*/
static final File RUT = new File("/Users/apete/Developer/data/finance/^RUT.csv");
public static void main(final String... args) {
BasicLogger.debug();
BasicLogger.debug(TimeSeriesGARCH.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
FinanceDataReader<Data> readerN225 = FinanceDataReader.of(N225, YahooParser.INSTANCE);
BasicSeries<LocalDate, PrimitiveNumber> seriesN225 = readerN225.getPriceSeries();
BasicLogger.debug("N225: {}", seriesN225.toString());
FinanceDataReader<Data> readerRUT = FinanceDataReader.of(RUT, YahooParser.INSTANCE);
BasicSeries<LocalDate, PrimitiveNumber> seriesRUT = readerRUT.getPriceSeries();
BasicLogger.debug("RUT: {}", seriesRUT.toString());
BasicLogger.debug();
/*
* If you want to calculate something like correlations between a set of series they need to be
* coordinated – the same start, finish and sampling frequency, as well as no missing values.
*/
CoordinatedSet<LocalDate> coordinatedDaily = DataSource.coordinated(CalendarDateUnit.DAY).add(readerN225).add(readerRUT).get();
BasicLogger.debug("Coordinated daily : {}", coordinatedDaily);
/*
* We have 1 value per day, but also know that we're missing data for weekends, holidays and such
* (which need to be filled-in). Could be a good idea to instead coordinate weekly data.
*/
CoordinatedSet<LocalDate> coordinatedWeekly = DataSource.coordinated(CalendarDateUnit.WEEK).add(readerN225).add(readerRUT).get();
BasicLogger.debug("Coordinated weekly : {}", coordinatedWeekly);
/*
* Note that we don't feed the coordinator series, but something that can provide series. In this case
* it's file readers/parsers, but it could just as well be something that calls web services, queries
* a database or whatever. A coordinated data source will populate itself (lazily) on request, and
* then clean up / reset itself using a timer.
*/
CoordinatedSet<LocalDate> coordinatedMonthly = DataSource.coordinated(CalendarDateUnit.MONTH).add(readerN225).add(readerRUT).get();
BasicLogger.debug("Coordinated monthly : {}", coordinatedMonthly);
CoordinatedSet<LocalDate> coordinatedAnnually = DataSource.coordinated(CalendarDateUnit.YEAR).add(readerN225).add(readerRUT).get();
BasicLogger.debug("Coordinated annually: {}", coordinatedAnnually);
BasicLogger.debug();
/*
* Once we have a CoordinatedSet we can easily calculate the correlation coefficients.
*/
R032Store correlations = coordinatedWeekly.log().differences().getCorrelations(R032Store.FACTORY);
BasicLogger.debugMatrix("Correlations", correlations);
/*
* That was a bit of a detour. Now let's focus on just one series, and the volatility of that index.
*/
PrimitiveSeries logDifferences = seriesN225.resample(DataSource.FRIDAY_OF_WEEK).asPrimitive().log().differences();
SampleSet logDifferenceStatistics = SampleSet.wrap(logDifferences);
BasicLogger.debug("Log difference statistics: {}", logDifferenceStatistics.toString());
PrimitiveSeries errorTerms = logDifferences.subtract(logDifferenceStatistics.getMean());
SampleSet errorTermStatistics = SampleSet.wrap(errorTerms);
BasicLogger.debug("Error term statistics: {}", errorTermStatistics.toString());
/*
* If we assume homoscedasticity then the (weekly) variance of the N225 time series is simply the
* variance of the error term sample set. Note also that the variance of the log differences series
* and the error term differences are the same.
*/
BasicLogger.debug();
BasicLogger.debug("Variance");
BasicLogger.debug(1, "of log differences: {}", logDifferenceStatistics.getVariance());
BasicLogger.debug(1, "of error terms : {}", errorTermStatistics.getVariance());
/*
* Any/all financial markets experience periods of turbulence with higher volatility than otherwise.
* To model that we cannot simply have 1 fixed number to describe the volatility of a time series
* spanning almost 60 years. We need something else.
*/
GARCH model = GARCH.estimate(errorTerms, 2, 3);
/*
* GARCH model with 2 lagged variance values and 3 squared error terms
*/
try (TextLineWriter writer = TextLineWriter.of(OUTPUT)) {
// CSV line builder - to help write CSV files
CSVLineBuilder lineBuilder = writer.newCSVLineBuilder(ASCII.HT);
lineBuilder.append("Day").append("Error Term").append("Standard Deviation").write();
/*
* Looping through the error term series. For each item update the GARCH model and write the error
* term and the estimated standard deviation to file. The contents of that file is then used to
* generate the chart in the blog post.
*/
for (int i = 0; i < errorTerms.size(); i++) {
double standardDeviation = model.getStandardDeviation(); // estimated std dev
double value = errorTerms.value(i); // actual value/error (the mean is 0.0)
lineBuilder.append(i).append(value).append(standardDeviation).write();
model.update(value); // update model with actual value
}
} catch (IOException cause) {
throw new RuntimeException(cause);
}
/*
* Presumably the main benefit of using GARCH models is to get a better estimate of current (near
* future) volatility.
*/
/*
* We used weekly values. To annualize volatility it needs to be scaled.
*/
double annualizer = Math.sqrt(52);
BasicLogger.debug();
BasicLogger.debug("Volatility (annualized)");
BasicLogger.debug(1, "constant (homoscedasticity): {}", StandardType.PERCENT.format(annualizer * errorTermStatistics.getStandardDeviation()));
BasicLogger.debug(1, "of GARCH model (current/latest value): {}", StandardType.PERCENT.format(annualizer * model.getStandardDeviation()));
/*
* We can also use the GARCH model to simulate future scenarios.
*/
int numberOfScenarios = 100;
int numberOfProcessSteps = 52;
double processStepSize = 1.0;
/*
* This will simulate 100 scenarios, stepping/incrementing the process 52 times, 1 week at the time.
*/
SimulationResults simulationResults = StationaryNormalProcess.of(model).simulate(numberOfScenarios, numberOfProcessSteps, processStepSize);
}
}
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.SIGMOID;
import static org.ojalgo.ann.ArtificialNeuralNetwork.Activator.SOFTMAX;
import static org.ojalgo.function.constant.PrimitiveMath.DIVIDE;
import java.io.File;
import java.io.IOException;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.ann.ArtificialNeuralNetwork;
import org.ojalgo.ann.NetworkInvoker;
import org.ojalgo.ann.NetworkTrainer;
import org.ojalgo.array.ArrayAnyD;
import org.ojalgo.data.DataBatch;
import org.ojalgo.data.image.ImageData;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.netio.IDX;
import org.ojalgo.netio.ToFileWriter;
import org.ojalgo.structure.AccessAnyD.MatrixView;
import org.ojalgo.type.format.NumberStyle;
/**
* A example of how to build, train and use artificial neural networks with ojAlgo using the MNIST database.
* This is an updated version of a previous example.
*
* @see https://www.ojalgo.org/2021/08/artificial-neural-network-example-v2/
* @see https://www.ojalgo.org/2018/09/introducing-artificial-neural-networks-with-ojalgo/
* @see https://github.com/optimatika/ojAlgo/wiki/Artificial-Neural-Networks
*/
public class TrainingANN {
static final File OUTPUT_TEST_IMAGES = new File("/Users/apete/Developer/data/images/test/");
static final File OUTPUT_TRAINING_IMAGES = new File("/Users/apete/Developer/data/images/training/");
static final File TEST_IMAGES = new File("/Users/apete/Developer/data/t10k-images-idx3-ubyte");
static final File TEST_LABELS = new File("/Users/apete/Developer/data/t10k-labels-idx1-ubyte");
static final File TRAINING_IMAGES = new File("/Users/apete/Developer/data/train-images-idx3-ubyte");
static final File TRAINING_LABELS = new File("/Users/apete/Developer/data/train-labels-idx1-ubyte");
public static void main(final String[] args) throws IOException {
BasicLogger.debug();
BasicLogger.debug(TrainingANN.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
int trainingEpochs = 50;
int batchSize = 100;
int numberToPrint = 10;
boolean generateImages = false;
ArtificialNeuralNetwork network = ArtificialNeuralNetwork.builder(28 * 28).layer(200, SIGMOID).layer(10, SOFTMAX).get();
NetworkTrainer trainer = network.newTrainer(batchSize).rate(0.01).dropouts();
ArrayAnyD<Double> trainingLabels = IDX.parse(TRAINING_LABELS);
ArrayAnyD<Double> trainingImages = IDX.parse(TRAINING_IMAGES);
trainingImages.modifyAll(DIVIDE.by(255)); // Normalise the image pixel values that are between 0 and 255
DataBatch inputBatch = trainer.newInputBatch();
DataBatch outputBatch = trainer.newOutputBatch();
for (int e = 0; e < trainingEpochs; e++) {
for (MatrixView<Double> imageData : trainingImages.matrices()) {
inputBatch.addRow(imageData);
long imageIndex = imageData.index();
int label = trainingLabels.intValue(imageIndex);
// The label is an integer [0,9] representing the digit in the image
// That label is used as the index to set a single 1.0
outputBatch.addRowWithSingleUnit(label);
if (inputBatch.isFull()) {
trainer.train(inputBatch, outputBatch);
inputBatch.reset();
outputBatch.reset();
}
if (generateImages && e == 0) {
TrainingANN.generateImage(imageData, label, OUTPUT_TRAINING_IMAGES);
}
}
}
/*
* It is of course possible to invoke/evaluate the network using batched input data. Further more it
* is possible to have multiple invokers running in different threads. Here we stick to 1 thread and
* simple batch size == 1.
*/
NetworkInvoker invoker = network.newInvoker();
ArrayAnyD<Double> testLabels = IDX.parse(TEST_LABELS);
ArrayAnyD<Double> testImages = IDX.parse(TEST_IMAGES);
testImages.modifyAll(DIVIDE.by(255));
int right = 0;
int wrong = 0;
for (MatrixView<Double> imageData : testImages.matrices()) {
long expected = testLabels.longValue(imageData.index());
long actual = invoker.invoke(imageData).indexOfLargest();
if (actual == expected) {
right++;
} else {
wrong++;
}
if (imageData.index() < numberToPrint) {
BasicLogger.debug("");
BasicLogger.debug("Image {}: {} <=> {}", imageData.index(), expected, actual);
IDX.print(imageData, BasicLogger.DEBUG);
}
if (generateImages) {
TrainingANN.generateImage(imageData, expected, OUTPUT_TEST_IMAGES);
}
}
BasicLogger.debug("");
BasicLogger.debug("=========================================================");
BasicLogger.debug("Error rate: {}", (double) wrong / (double) (right + wrong));
}
private static void generateImage(final MatrixView<Double> imageData, final long imageLabel, final File directory) throws IOException {
ToFileWriter.mkdirs(directory);
int nbRows = imageData.getRowDim();
int nbCols = imageData.getColDim();
// IDX-files and ojAlgo data structures are indexed differently.
// That doesn't matter when we're doing the math,
// but need to transpose the data when creating an image to look at.
ImageData image = ImageData.newGreyScale(nbCols, nbRows);
for (int i = 0; i < nbRows; i++) {
for (int j = 0; j < nbCols; j++) {
// The colours are stored inverted in the IDX-files (255 means "ink"
// and 0 means "no ink". In computer graphics 255 usually means "white"
// and 0 "black".) In addition the image data was previously scaled
// to be in the range [0,1]. That's why...
double grey = 255.0 * (1.0 - imageData.doubleValue(i, j));
image.set(j, i, grey);
}
}
String name = NumberStyle.toUniformString(imageData.index(), 60_000) + "_" + imageLabel + ".png";
File outputfile = new File(directory, name);
image.writeTo(outputfile);
}
}
import java.io.File;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.optimisation.Expression;
import org.ojalgo.optimisation.ExpressionsBasedModel;
import org.ojalgo.optimisation.Optimisation.Result;
import org.ojalgo.optimisation.Variable;
/**
* A program that shows how to use an MPS file with ojAlgo
*
* @see https://www.ojalgo.org/2022/05/optimisation-model-file-formats/
* @see https://github.com/optimatika/ojAlgo/wiki/Using-MPS
*/
public class UsingMPS {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(UsingMPS.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
// Get a reference to the MPS-file
File mpsFile = new File("./testprob.mps");
// Instantiate the MPS-model
ExpressionsBasedModel model = ExpressionsBasedModel.parse(mpsFile);
// Optionally validate the model
if (model.validate()) {
BasicLogger.debug("MPS-model ok!");
} else {
BasicLogger.debug("MPS-model problem!");
}
BasicLogger.debug(model);
// The MPS format does not include a standard way to specify if the
// model/problem is meant to be minimised or maximised. There is a convention
// to include an OBJSENSE section where this would be specified, but this
// is not a requirement.
Result minimiseResult = model.minimise();
// Print the result
BasicLogger.debug("Minimised => " + minimiseResult);
// The solution variable values are returned in the order the columns/variables
// were defined in the MPS-file.
Result maximiseResult = model.maximise();
BasicLogger.debug("Maximised => " + maximiseResult);
/*
* Reading an MPS creates an ExpressionsBasedModel just as if you'd created it programatically, and
* you may continue work on that model.
*/
BasicLogger.debug();
BasicLogger.debug("=== Variables ===");
for (Variable var : model.getVariables()) {
BasicLogger.debug(var);
}
BasicLogger.debug();
BasicLogger.debug("=== Expressions ===");
for (Expression exp : model.getExpressions()) {
BasicLogger.debug(exp);
}
BasicLogger.debug();
// Alter the two inequalities to allow a slightly wider range
model.getExpression("LIM1").upper(5.5); // Change from 5.0 to 5.5
model.getExpression("LIM2").lower(9.5); // Change from 10.0 to 9.5
BasicLogger.debug("Modified => " + model.minimise());
// Now the solution is no longer integer valued (as it happened to be before),
// but we can add that requirement to each of the variables...
for (Variable var : model.getVariables()) {
var.integer(true);
}
// We get the old solution back
BasicLogger.debug("Integer constrained => " + model.minimise());
File modifiedModel = new File("/Users/apete/Developer/data/testprob.ebm");
/*
* We can also write the model to file, but now we have to use the EBM file format.
*/
model.writeTo(modifiedModel);
/*
* That model can be read back the same way we read the MPS file before.
*/
model = ExpressionsBasedModel.parse(modifiedModel);
BasicLogger.debug();
BasicLogger.debug("Modified model");
BasicLogger.debug(model);
}
}
import java.io.File;
import java.math.BigDecimal;
import java.util.List;
import org.ojalgo.OjAlgoUtils;
import org.ojalgo.array.*;
import org.ojalgo.netio.BasicLogger;
import org.ojalgo.random.Cauchy;
import org.ojalgo.random.Normal;
import org.ojalgo.random.Uniform;
import org.ojalgo.scalar.ComplexNumber;
import org.ojalgo.scalar.Quadruple;
import org.ojalgo.scalar.Quaternion;
import org.ojalgo.scalar.RationalNumber;
import org.ojalgo.structure.AccessAnyD.MatrixView;
import org.ojalgo.structure.AccessAnyD.VectorView;
import org.ojalgo.structure.ElementView1D;
import org.ojalgo.type.math.MathType;
/**
* Demonstrate some basic array functionality.
*
* @see https://www.ojalgo.org/2021/08/working-with-arrays/
* @see https://github.com/optimatika/ojAlgo/wiki/Working-with-arrays
*/
public class WorkingWithArrays {
public static void main(final String[] args) {
BasicLogger.debug();
BasicLogger.debug(WorkingWithArrays.class);
BasicLogger.debug(OjAlgoUtils.getTitle());
BasicLogger.debug(OjAlgoUtils.getDate());
BasicLogger.debug();
/*
* The top level classes in org.ojalgo.array are:
*/
Array1D<BigDecimal> array1D = Array1D.R256.make(12);
Array2D<ComplexNumber> array2D = Array2D.C128.newDenseBuilder(12, 8);
ArrayAnyD<Double> arrayAnyD = ArrayAnyD.R064.newSparseBuilder(12, 8, 4);
/*
* They represent 1-, 2- and N-dimensional arrays. They are generic, so they could hold many different
* element/component types, but the generic type argument doesn't always describe exactly what the
* element type is. For instance "Double" does NOT only represent 'double' but any/all primitive types
* (double, float, long, int short and byte). There is an enum outlining which number types ojAlgo
* supports.
*/
BasicLogger.debug("Element types supported ny ojAlgo");
BasicLogger.debug("=================================");
for (MathType mathType : MathType.values()) {
BasicLogger.debug("{} := Math number set {} implemented as {} x {}", mathType, mathType.getNumberSet(), mathType.getComponents(),
mathType.getJavaType());
}
BasicLogger.debug();
/*
* There's a couple of things to take note of regarding those 3 top level classes: (1) Every single
* method implemented is declared in some interface elsewhere – typically in the org.ojalgo.structure
* package – and those same interfaces are widely used throughout ojAlgo. (2) If you look at the
* source code implementations you'll find that essentially nothing actually happens in these classes.
* Instead just about everything is delegeted to lower level classes. Those lower level
* implementations can in turn be a number of different things, but in the end there has to be a
* plain/dense array like float[], double[] or Number[]. There is a wide range of such plain/dense
* array implementations in ojAlgo.
*/
/*
* Plain Java array based
*/
DenseArray.Factory<Double> plainByteArrayFactory = ArrayZ008.FACTORY; // 8-bit Integer (byte)
DenseArray.Factory<Double> plainShortArrayFactory = ArrayZ016.FACTORY; // 16-bit Integer (short)
DenseArray.Factory<Double> plainIntArrayFactory = ArrayZ032.FACTORY; // 32-bit Integer (int)
DenseArray.Factory<Double> plainLongArrayFactory = ArrayZ064.FACTORY; // 64-bit Integer (long)
DenseArray.Factory<Double> plainFloatArrayFactory = ArrayR032.FACTORY; // 32-bit Real (float)
DenseArray.Factory<Double> plainDoubleArrayFactory = ArrayR064.FACTORY; // 64-bit Real (double)
DenseArray.Factory<Quadruple> plainQuadrupleArrayFactory = ArrayR128.FACTORY; // 128-bit Real (Quadruple)
DenseArray.Factory<BigDecimal> plainBigDecimalArrayFactory = ArrayR256.FACTORY; // 256-bit Real (BigDecimal)
DenseArray.Factory<RationalNumber> plainRationalNumberArrayFactory = ArrayQ128.FACTORY; // 128-bit Rational Number (2 x long)
DenseArray.Factory<ComplexNumber> plainComplexNumberArrayFactory = ArrayC128.FACTORY; // 128-bit Complex Number (2 x double)
DenseArray.Factory<Quaternion> plainQuaternionArrayFactory = ArrayH256.FACTORY; // 256-bit Quaternion (4 x double)
/*
* Buffer based "arrays"
*/
DenseArray.Factory<Double> bufferByteArrayFactory = BufferArray.Z008;
DenseArray.Factory<Double> bufferShortArrayFactory = BufferArray.Z016;
DenseArray.Factory<Double> bufferIntArrayFactory = BufferArray.Z032;
DenseArray.Factory<Double> bufferLongArrayFactory = BufferArray.Z064;
DenseArray.Factory<Double> bufferFloatArrayFactory = BufferArray.R032;
DenseArray.Factory<Double> bufferDoubleArrayFactory = BufferArray.R064;
/*
* Using Unsafe there are also off-heap "arrays"
*/
DenseArray.Factory<Double> offHeapByteArrayFactory = OffHeapArray.Z008;
DenseArray.Factory<Double> offHeapShortArrayFactory = OffHeapArray.Z016;
DenseArray.Factory<Double> offHeapIntArrayFactory = OffHeapArray.Z032;
DenseArray.Factory<Double> offHeapLongArrayFactory = OffHeapArray.Z064;
DenseArray.Factory<Double> offHeapFloatArrayFactory = OffHeapArray.R032;
DenseArray.Factory<Double> offHeapDoubleArrayFactory = OffHeapArray.R064;
/*
* Those factories dictate both the element type and where/how they are stored. The factories can be
* used directly to instantiate plain/dense arrays,
*/
DenseArray<BigDecimal> plainBigDecimalArray = plainBigDecimalArrayFactory.make(512);
DenseArray<ComplexNumber> plainComplexNumberArray = plainComplexNumberArrayFactory.makeFilled(512, Normal.standard());
DenseArray<Double> bufferFloatArray = bufferFloatArrayFactory.makeFilled(512, Uniform.standard());
DenseArray<Double> offHeapDoubleArray = offHeapDoubleArrayFactory.makeFilled(512, Cauchy.standard());
DenseArray<Double> plainFloatArray = plainFloatArrayFactory.copy(plainBigDecimalArray);
DenseArray<Double> plainDoubleArray = plainDoubleArrayFactory.copy(plainComplexNumberArray);
DenseArray<Quaternion> plainQuaternionArray = plainQuaternionArrayFactory.copy(bufferFloatArray);
DenseArray<RationalNumber> plainRationalNumberArray = plainRationalNumberArrayFactory.copy(offHeapDoubleArray);
/*
* or they can be used as input to creating higher level factories (factories that create higher level
* data structures).
*/
array1D = Array1D.factory(plainBigDecimalArrayFactory).make(100);
array2D = Array2D.factory(plainComplexNumberArrayFactory).make(10, 10);
arrayAnyD = ArrayAnyD.factory(offHeapDoubleArrayFactory).make(10, 100, 1000, 3);
SparseArray<Double> sparse = SparseArray.factory(plainFloatArrayFactory).make(1000);
List<Double> list = NumberList.factory(bufferDoubleArrayFactory).make();
LongToNumberMap<Double> map = LongToNumberMap.factory(offHeapDoubleArrayFactory).make();
/*
* If you implement your own dense array factory, then just plug it in and get access to all higher
* level functionalaity.
*/
/*
* For the buffer based array classes there is also an option to create "arrays" backed by memory
* mapped files:
*/
BufferArray.MappedFileFactory fileBackedArrayFactory = BufferArray.R032.newMapped(new File("somePath"));
/*
* Regarding Any-D Arrays
*/
long[] shape = { 2, 3, 1, 4 };
arrayAnyD = ArrayAnyD.factory(plainDoubleArrayFactory).make(shape);
/*
* The rank of an Any-D array is the length of the shape used to instatiate it.
*/
BasicLogger.debug("shape.lengtgh {} = rank() {}", shape.length, arrayAnyD.rank());
BasicLogger.debug("Original shape: {}", arrayAnyD.shape());
ArrayAnyD<Double> squeezed = arrayAnyD.squeeze();
/*
* squeeze() removes all indices/dimensions of range 1.
*/
BasicLogger.debug("Squeezed shape: {}", squeezed.shape());
ArrayAnyD<Double> reshaped = arrayAnyD.reshape(3, 2, 4);
/*
* reshape(...) changes the indexing but must maintain the same total number of elements.
*/
BasicLogger.debug("Reshaped shape: {}", reshaped.shape());
/*
* Now we have 3 ArrayAnyD instances, with different shapes, all backed by the same lower level
* plain/dense array.
*/
BasicLogger.debug("3 different shapes: {}, {} and {}", arrayAnyD.shape(), squeezed.shape(), reshaped.shape());
/*
* Let's fill that squeezed instance with some numbers
*/
squeezed.loopAllReferences(ref -> squeezed.set(ref, 1D + ref[2]));
/*
* Now using the reshaped instance we'll check the contents. Apart from random access contents can be
* iterated element-wise, vector-wise or matrix-wise. In this case there are 24 elements, 8 vectors
* (each of length 3) and 4 matrices (each of 2 column vectors of length 3 – 3 rows and 2 columns).
*/
BasicLogger.debug();
BasicLogger.debug("Element-wise iteration");
for (ElementView1D<Double, ?> element : reshaped.elements()) {
BasicLogger.debug("ElementView {}: {}", element.index(), element);
}
BasicLogger.debug();
BasicLogger.debug("Vector-wise iteration");
for (VectorView<Double> vector : reshaped.vectors()) {
BasicLogger.debug("VectorView {}: {}", vector.index(), vector);
}
BasicLogger.debug();
BasicLogger.debug("Matrix-wise iteration");
for (MatrixView<Double> matrix : reshaped.matrices()) {
BasicLogger.debug("MatrixView {}: {}", matrix.index(), matrix);
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment