PROWAREtech

articles » current » dot-net » neural-network-supervised-machine-learning

.NET: Neural Network, Supervised Deep Machine Learning Example in C#

An example neural network, deep learning library written in C#; works with practically any data as long as it is properly normalized and trained.

Supervised learning is a machine learning paradigm for problems where the available data consists of labeled examples, meaning that each data point contains features (covariates) and an associated label. The goal of supervised learning algorithms is learning a function that maps feature vectors (inputs) to labels (output), based on example input-output pairs.

This neural network code is available in a C++ library.

See unsupervised learning version. Also, see convolutional neural network example.

Learn about feedforward neural networks. Learn more about the lambda parameter.

Visit the playground for related.

This network supports both Categorical Cross-Entropy (CCE) and Sparse Categorical Cross-Entropy (SCCE). To support CCE, supply a one-hot vector, for SCCE, supply the index into the output layer — that's it! Oh, plus make sure to use a linear output neuron activation for SCCE, and SoftMax for CCE.

Download these files including how to train the network and the MNIST image files of hand-written digits with their labels: NEURALNETWORK.zip. Experiment with the number of neurons and layers. This example usage code requires SixLabors.ImageSharp (NuGet package).

The neural network code:


// NOTE: This network only supports forms of Categorical Cross-Entropy loss a.k.a. SoftMax loss for multi-class classification.
// NOTE: This network supports "Sparse Categorical Cross-Entropy" (SCCE) which requires a Linear final layer activation.
// NOTE: SCCE requires much less memory for networks with a large number of outputs, like NLP networks, than having to
// create a one-hot array for each output. SCCE networks are more efficient with resources.

// CceNeuralNetwork.cs
// Compatible with .NET Core 3.1 and later
using System;
using System.IO;
using System.Collections.Generic;
using System.Threading;

namespace ML
{
	public class NeuralNetwork
	{
		public List<Matrix> Weights { get; set; }

		public List<Matrix> Biases { get; set; }

		public int LayerCount { get; set; }

		public double ClipThreshold { get; set; }

		public NeuralNetwork()
		{
			Biases = new List<Matrix>();
			Weights = new List<Matrix>();
		}

		// clipThreshold may be needed when working with lots of data such as with somewhat large image recognition with a CNN, for example
		public NeuralNetwork(uint[] neuronCounts, IActivationMethods activationMethods, double clipThreshold = 0.0, bool biases = true)
		{
			Biases = new List<Matrix>();
			if (biases)
				for (int i = 1; i < neuronCounts.Length; i++)
				{
					if (neuronCounts[i] == 0)
						throw new Exception("Neuron count cannot be 0.");
					Biases.Add(new Matrix(neuronCounts[i], 1));
				}

			Weights = new List<Matrix>();
			for (int i = 0; i < neuronCounts.Length - 1; i++)
			{
				if (neuronCounts[i + 1] == 0 || neuronCounts[i] == 0)
					throw new Exception("Neuron count cannot be 0.");
				Weights.Add(new Matrix(neuronCounts[i + 1], neuronCounts[i]));
			}

			LayerCount = neuronCounts.Length;

			ClipThreshold = clipThreshold;

			activationMethods.Randomize(Weights, Biases);
		}

		// lambda is for the L2 regularization term, and should be a very small fraction (between zero and one) to help prevent overfitting and exploding gradients
		// at zero, it provides no regularization and risks exploding gradients, use a clipThreshold, such as 5.0
		// training might be slowed with multiple threads because the batch of training data of each thread is smaller and therefore has less to learn from; consider decreasing the number of threads as the epoch count increases and experimenting with the learning rate
		// learningRate can decrease by using an algorithm such as: 0.1 ^ (epoch / (double)epochCount) * initialLearningRate
		public void Train(List<Matrix> givenInputs, List<Matrix> desiredOutputs, IActivationMethods activationObject, double learningRate, double lambda, int threadCount = 0)
		{
			if (givenInputs.Count != desiredOutputs.Count)
				throw new ArgumentException("\"givenInputs\" count must match \"desiredOutputs\" count.");

			if (threadCount < 1)
				threadCount = Environment.ProcessorCount;
			int mini_batch_size = givenInputs.Count / threadCount;
			if (mini_batch_size > 0)
			{
				var bps = new List<BatchParams>();
				for (int x = 0; x < threadCount; x++)
				{
					var bp = new BatchParams()
					{
						network = this,
						givenInputs = givenInputs.GetRange(x * mini_batch_size, mini_batch_size),
						desiredOutputs = desiredOutputs.GetRange(x * mini_batch_size, mini_batch_size),
						local_weights = new List<Matrix>(),
						local_biases = new List<Matrix>(),
						activationObject = activationObject,
						learningRate = learningRate,
						lambda = lambda,
						delta_gradient_w = new Matrix[Weights.Count],
						delta_gradient_b = new Matrix[Biases.Count],
						thread = new Thread(TrainMiniBatch) { IsBackground = true },
						threadCount = threadCount
					};

					foreach (var weight in Weights)
						bp.local_weights.Add(new Matrix(weight));

					foreach (var bias in Biases)
						bp.local_biases.Add(new Matrix(bias));

					bps.Add(bp);
					bp.thread.Start(bp);
				}
				while (true)
				{
					int x = 0;
					for (int i = 0; i < bps.Count; i++)
					{
						if (bps[i].thread == null)
							x++;
						else
						{
							if (bps[i].thread.IsAlive)
								Thread.Sleep(100);
							else
							{
								bps[i].thread = null;
								x++;
							}
						}
					}
					if (x == bps.Count)
						break;
				}

				for (int x = 0; x < Weights.Count; x++)
				{
					var weights = new List<Matrix>();
					for (int i = 0; i < bps.Count; i++)
						weights.Add(bps[i].local_weights[x]);
					ParameterAveraging(Weights[x], weights);
				}

				for (int x = 0; x < Biases.Count; x++)
				{
					var biases = new List<Matrix>();
					for (int i = 0; i < bps.Count; i++)
						biases.Add(bps[i].local_biases[x]);
					ParameterAveraging(Biases[x], biases);
				}
			}
			for (int x = threadCount * mini_batch_size; x < givenInputs.Count; x++)
				Train(givenInputs[x], desiredOutputs[x], activationObject, learningRate, lambda);
		}

		// lambda is for the L2 regularization term, and should be a very small fraction (between zero and one) to help prevent overfitting and exploding gradients
		// at zero, it provides no regularization and risks exploding gradients, use a clipThreshold, such as 5.0
		// learningRate can decrease by using an algorithm such as: 0.1 ^ (epoch / (double)epochCount) * initialLearningRate
		public void Train(Matrix givenInputs, Matrix desiredOutputs, IActivationMethods activationObject, double learningRate, double lambda)
		{
			var delta_gradient_w = new Matrix[Weights.Count];
			var delta_gradient_b = new Matrix[Biases.Count];

			BackPropagate(this, Weights, Biases, givenInputs, desiredOutputs, activationObject, delta_gradient_w, delta_gradient_b);

			var new_weights = new List<Matrix>();
			var new_biases = new List<Matrix>();

			for (int i = 0; i < delta_gradient_w.Length; i++)
			{
				for (uint j = 0; j < delta_gradient_w[i].rows; j++)
					for (uint k = 0; k < delta_gradient_w[i].columns; k++)
					{
						double w = Weights[i].GetValue(j, k);
						double nw = delta_gradient_w[i].GetValue(j, k);
						delta_gradient_w[i].SetValue(j, k, (1 - learningRate * lambda) * w - learningRate * nw);
					}
				new_weights.Add(delta_gradient_w[i]);
			}

			for (int i = 0; i < delta_gradient_b.Length; i++)
			{
				for (uint j = 0; j < delta_gradient_b[i].rows; j++)
				{
					double b = Biases[i].GetValue(j, 0);
					double nb = delta_gradient_b[i].GetValue(j, 0);
					delta_gradient_b[i].SetValue(j, 0, b - learningRate * nb);
				}
				new_biases.Add(delta_gradient_b[i]);
			}

			Weights = new_weights;
			Biases = new_biases;

		}
		public uint TrueIndex(Matrix desiredOutputs)
		{
			uint index;
			if (desiredOutputs.rows == 1) // SCCE (Linear, must apply SoftMax)
				index = (uint)desiredOutputs.GetValue(0, 0);
			else
				index = Functions.GetIndexMax(desiredOutputs);
			return index;
		}

		public uint PredictedIndex(Matrix givenInputs, IActivationMethods activationObject)
		{
			Matrix ff = FeedForward(givenInputs, activationObject);
			return Functions.GetIndexMax(ff);
		}

		public double CalculateLoss(Matrix givenInputs, Matrix desiredOutputs, IActivationMethods activationObject, out uint predictedIndex, out uint trueIndex)
		{
			uint iPredicted, iTrue;
			Matrix ff = FeedForward(givenInputs, activationObject);
			if (desiredOutputs.rows == 1 && givenInputs.rows != desiredOutputs.rows) // Calculate SCCE (Linear)
			{
				Functions.SoftMax(ff);
				iPredicted = Functions.GetIndexMax(ff);
				iTrue = (uint)desiredOutputs.GetValue(0, 0); // holds the index of the true element
			}
			else
			{
				iPredicted = Functions.GetIndexMax(ff);
				iTrue = Functions.GetIndexMax(desiredOutputs);
			}
			double loss, a = (iPredicted == iTrue) ? ff.GetValue(iPredicted, 0) : 0;
			if (a == 0)
				loss = 1.0;
			else if (a == 1)
				loss = 0.0;
			else
				loss = -Math.Log(a);
			predictedIndex = iPredicted;
			trueIndex = iTrue;
			return loss;
		}

		public Matrix FeedForward(Matrix givenInputs, IActivationMethods activationObject)
		{
			for (int i = 0; i < LayerCount - 1; i++)
			{
				Matrix? temp;
				Matrix.Multiply(Weights[i], givenInputs, out temp);
				if (temp == null)
					throw new ArgumentException("Cannot multiply matrices.");

				if (Biases.Count > 0) // add bias
				{
					for (uint j = 0; j < temp.rows; j++)
						for (uint k = 0; k < temp.columns; k++)
							temp.SetValue(j, k, temp.GetValue(j, k) + Biases[i].GetValue(j, 0));
				}

				if (i < LayerCount - 2)
					activationObject.ActivationMethod(temp);
				else
					activationObject.OutputActivationMethod(temp);
				givenInputs = temp;
			}
			return givenInputs;
		}

		#region Private_Decl
		private static void TrainMiniBatch(object? o)
		{
			BatchParams bp = (BatchParams)o;

			for (int x = 0; x < bp.givenInputs.Count; x++)
			{
				BackPropagate(bp.network, bp.local_weights, bp.local_biases, bp.givenInputs[x], bp.desiredOutputs[x], bp.activationObject, bp.delta_gradient_w, bp.delta_gradient_b);

				for (int i = 0; i < bp.delta_gradient_w.Length; i++)
				{
					for (uint row = 0; row < bp.delta_gradient_w[i].rows; row++)
						for (uint column = 0; column < bp.delta_gradient_w[i].columns; column++)
						{
							double w = bp.local_weights[i].GetValue(row, column);
							double nw = bp.delta_gradient_w[i].GetValue(row, column);
							bp.local_weights[i].SetValue(row, column, (1 - bp.learningRate * bp.lambda) * w - (bp.learningRate / bp.threadCount) * nw);
						}
				}

				for (int i = 0; i < bp.delta_gradient_b.Length; i++)
				{
					for (uint row = 0; row < bp.delta_gradient_b[i].rows; row++)
					{
						double b = bp.local_biases[i].GetValue(row, 0);
						double nb = bp.delta_gradient_b[i].GetValue(row, 0);
						bp.local_biases[i].SetValue(row, 0, b - (bp.learningRate / bp.threadCount) * nb);
					}
				}
			}
		}

		private static void ParameterAveraging(Matrix globalParameters, List<Matrix> localParametersOfThreads)
		{
			// Initialize a temporary matrix to hold the sum of local parameters
			Matrix sumOfLocalParams = new Matrix(globalParameters.rows, globalParameters.columns);
			for (int threadId = 0; threadId < localParametersOfThreads.Count; threadId++)
				for (uint row = 0; row < localParametersOfThreads[threadId].rows; row++)
					for (uint column = 0; column < localParametersOfThreads[threadId].columns; column++)
						sumOfLocalParams.SetValue(row, column, sumOfLocalParams.GetValue(row, column) + localParametersOfThreads[threadId].GetValue(row, column));
			// Update the global parameter matrix using parameter averaging formula
			for (uint row = 0; row < globalParameters.rows; ++row)
				for (uint column = 0; column < globalParameters.columns; ++column)
					globalParameters.SetValue(row, column, sumOfLocalParams.GetValue(row, column) / localParametersOfThreads.Count);
		}

		// uses Stochastic Gradient Descent
		private static void BackPropagate(NeuralNetwork network, List<Matrix> Weights, List<Matrix> Biases, Matrix givenInputs, Matrix desiredOutputs, IActivationMethods activationObject, Matrix[] delta_gradient_w, Matrix[] delta_gradient_b)
		{
			Matrix activation = givenInputs;

			List<Matrix> activations = new List<Matrix> { activation };

			List<Matrix> zs = new List<Matrix>();

			// feed forward
			for (int i = 0; i < network.LayerCount - 1; i++)
			{
				Matrix? z;
				Matrix.Multiply(Weights[i], activation, out z);
				if (z == null)
					throw new ArgumentException("Cannot multiply matrices.");

				if (Biases.Count > 0) // add bias
				{
					for (uint j = 0; j < z.rows; j++)
						for (uint k = 0; k < z.columns; k++)
							z.SetValue(j, k, z.GetValue(j, k) + Biases[i].GetValue(j, 0));
				}

				zs.Add(new Matrix(z));

				if (i < network.LayerCount - 2)
					activationObject.ActivationMethod(z);
				else
					activationObject.OutputActivationMethod(z);
				activation = z;
				activations.Add(activation);
			}

			// backward pass
			Matrix delta = new Matrix(activations[^1].rows, activations[^1].columns);

			Cost.Delta(activations[^1], desiredOutputs, delta);

			if (delta_gradient_b.Length > 0)
				delta_gradient_b[^1] = new Matrix(delta);

			Matrix transposed = new Matrix(activations[^2]);
			transposed.Transpose();

			Matrix? temp;
			Matrix.Multiply(delta, transposed, out temp);
			if (temp == null)
				throw new ArgumentException("Cannot multiply matrices.");
			delta_gradient_w[^1] = temp;

			for (int i = 2; i < network.LayerCount; i++)
			{
				var t = network.LayerCount - i;
				transposed = new Matrix(Weights[t]);
				transposed.Transpose();

				Matrix.Multiply(transposed, delta, out temp);
				if (temp == null)
					throw new ArgumentException("Cannot multiply matrices.");

				// multiply the derivative function on "temp"
				Matrix z = zs[^i];
				for (uint j = 0; j < temp.rows; j++)
					for (uint k = 0; k < temp.columns; k++)
						temp.SetValue(j, k, temp.GetValue(j, k) * activationObject.Derivative(z.GetValue(j, 0)));

				delta.Copy(temp);

				if (delta_gradient_b.Length > 0)
					delta_gradient_b[^i] = temp;

				t = network.LayerCount - i - 1;
				transposed = new Matrix(activations[t]);
				transposed.Transpose();
				Matrix.Multiply(delta, transposed, out temp);
				if (temp == null)
					throw new ArgumentException("Cannot multiply matrices.");

				delta_gradient_w[^i] = temp;
			}
			if (network.ClipThreshold > 0) // if greater than zero then will take care of exploding gradients
			{
				double gradients_norm, scale_factor;

				gradients_norm = 0;
				for (int i = 0; i < delta_gradient_b.Length; i++)
					for (uint j = 0; j < delta_gradient_b[i].rows; j++)
						for (uint k = 0; k < delta_gradient_b[i].columns; k++)
							gradients_norm += delta_gradient_b[i].GetValue(j, k) * delta_gradient_b[i].GetValue(j, k);
				gradients_norm = Math.Sqrt(gradients_norm);
				if (gradients_norm > network.ClipThreshold)
				{
					scale_factor = network.ClipThreshold / gradients_norm;
					for (uint i = 0; i < delta_gradient_b.Length; i++)
						for (uint j = 0; j < delta_gradient_b[i].rows; j++)
							for (uint k = 0; k < delta_gradient_b[i].columns; k++)
								delta_gradient_b[i].SetValue(j, k, delta_gradient_b[i].GetValue(j, k) * scale_factor);
				}

				// weights
				gradients_norm = 0;
				for (uint i = 0; i < delta_gradient_w.Length; i++)
					for (uint j = 0; j < delta_gradient_w[i].rows; j++)
						for (uint k = 0; k < delta_gradient_w[i].columns; k++)
							gradients_norm += delta_gradient_w[i].GetValue(j, k) * delta_gradient_w[i].GetValue(j, k);

				gradients_norm = Math.Sqrt(gradients_norm);
				if (gradients_norm > network.ClipThreshold)
				{
					scale_factor = network.ClipThreshold / gradients_norm;
					for (uint i = 0; i < delta_gradient_w.Length; i++)
						for (uint j = 0; j < delta_gradient_w[i].rows; j++)
							for (uint k = 0; k < delta_gradient_w[i].columns; k++)
								delta_gradient_w[i].SetValue(j, k, delta_gradient_w[i].GetValue(j, k) * scale_factor);
				}
			}
		}

		public string ToJson()
		{
			return System.Text.Json.JsonSerializer.Serialize(this);
		}

		public static NeuralNetwork? FromJson(Stream jsonStream)
		{
			return System.Text.Json.JsonSerializer.Deserialize<NeuralNetwork>(jsonStream);
		}

		public static NeuralNetwork? FromJson(string json)
		{
			return System.Text.Json.JsonSerializer.Deserialize<NeuralNetwork>(json);
		}
		#endregion

	}

	internal static class Randomization // play with learning rate when switching between these randomizations
	{
		static Random rand = new Random();
		private static double GetDouble()
		{
			return rand.NextDouble();
		}
		public static void RandomizeHeNormal(List<Matrix> Weights, List<Matrix> Biases)
		{
			for (int a = 0; a < Weights.Count; a++)
			{
				double init = Math.Sqrt(2.0 / Weights[a].columns); // HeNormal: good for ReLU activation

				for (uint i = 0; i < Weights[a].rows; i++)
					for (uint j = 0; j < Weights[a].columns; j++)
						Weights[a].SetValue(i, j, GetDouble() * init - init * 0.5);
			}
			for (int a = 0; a < Biases.Count; a++)
				for (uint i = 0; i < Biases[a].rows; i++)
					Biases[a].SetValue(i, 0, 0.01);
		}
		public static void RandomizeGlorotXavier(List<Matrix> Weights, List<Matrix> Biases)
		{
			for (int a = 0; a < Weights.Count; a++)
			{
				double init = Math.Sqrt(6.0 / (Weights[a].columns + Weights[a].rows)); // GlorotXavier: good for Tanh/Sigmoid activation

				for (uint i = 0; i < Weights[a].rows; i++)
					for (uint j = 0; j < Weights[a].columns; j++)
						Weights[a].SetValue(i, j, GetDouble() * init - init * 0.5);
			}
			for (int a = 0; a < Biases.Count; a++)
				for (uint i = 0; i < Biases[a].rows; i++)
					Biases[a].SetValue(i, 0, 0.01);
		}
	}

	internal class BatchParams
	{
		public NeuralNetwork network;
		public List<Matrix> givenInputs, desiredOutputs, local_weights;
		public List<Matrix> local_biases;
		public IActivationMethods activationObject;
		public Matrix[] delta_gradient_w;
		public Matrix[] delta_gradient_b;
		public double learningRate;
		public double lambda;
		public Thread? thread;
		public int threadCount;
	}

	public class Matrix
	{
		public double[] data { get; set; }
		public uint rows { get; set; }
		public uint columns { get; set; }
		public Matrix()
		{
			data = new double[0];
		}
		public Matrix(uint rows, uint columns)
		{
			this.rows = rows;
			this.columns = columns;
			data = new double[rows * columns];
		}
		public Matrix(uint rows, uint columns, double[] values)
		{
			this.rows = rows;
			this.columns = columns;
			data = new double[rows * columns];
			Array.Copy(values, data, rows * columns);
		}
		public Matrix(Matrix m)
		{
			rows = m.rows;
			columns = m.columns;
			data = new double[rows * columns];
			Array.Copy(m.data, data, rows * columns);
		}
		public void Transpose()
		{
			var result = new Matrix(columns, rows);
			for (uint c = 0; c < columns; c++)
				for (uint r = 0; r < rows; r++)
					result.SetValue(c, r, GetValue(r, c));
			Copy(result);
		}
		public static void Add(Matrix M, double V)
		{
			for (uint i = 0; i < M.rows; i++)
				for (uint j = 0; j < M.columns; j++)
					M.SetValue(i, j, M.GetValue(i, j) + V);
		}
		public static void Multiply(Matrix A, Matrix B, out Matrix? C) // this is part of the dot product
		{
			C = null;
			if (A.columns == B.rows)
			{
				uint n = A.columns;
				C = new Matrix(A.rows, B.columns);
				for (uint i = 0; i < C.rows; i++)
					for (uint j = 0; j < C.columns; j++)
					{
						for (uint k = 0; k < n; k++)
							C.SetValue(i, j, C.GetValue(i, j) + A.GetValue(i, k) * B.GetValue(k, j));
					}
			}
		}
		public void Dropout(double dropoutRate) // apply a dropout to the matrix
		{
			var rand = new Random();
			for (uint i = 0; i < rows; i++)
				for (uint j = 0; j < columns; j++)
					if (rand.NextDouble() < dropoutRate)
						SetValue(i, j, 0.0);
		}
		public double GetValue(uint row, uint column)
		{
			return data[row * columns + column];
		}
		public double SetValue(uint row, uint column, double value)
		{
			return data[row * columns + column] = value;
		}
		public void Copy(Matrix m)
		{
			if (rows * columns != m.rows * m.columns)
				data = new double[m.rows * m.columns];
			rows = m.rows;
			columns = m.columns;
			Array.Copy(m.data, data, rows * columns);
		}
	}
	public interface IActivationMethods
	{
		public void ActivationMethod(Matrix outputs);
		public void OutputActivationMethod(Matrix outputs);
		public double Derivative(double input);
		//public void OutputDerivative(Matrix z, Matrix derivatives);
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases);
	}
	public class ActivationReLUSoftMax : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Rectified Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.ReLU(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			Functions.SoftMax(outputs);
		}
		public double Derivative(double input)
		{
			return Functions.ReLUPrime(input);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives) // this is the SoftMax derivative and this may not work with the Quadratic Cost function
		//{
		//	double d;
		//	for (uint i = 0; i < z.rows; i++)
		//	{
		//		d = z.GetValue(i, 0);
		//		derivatives.SetValue(i, 0, d * (1.0 - d));
		//	}
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationELUSoftMax : IActivationMethods
	{
		public readonly double alpha;
		public ActivationELUSoftMax(double alpha = 1.0)
		{
			this.alpha = alpha;
		}
		public void ActivationMethod(Matrix outputs) // Exponential Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.ELU(outputs.GetValue(i, j), alpha));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			Functions.SoftMax(outputs);
		}
		public double Derivative(double input)
		{
			return Functions.ELUPrime(input, alpha);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives) // this is the SoftMax derivative and this may not work with the Quadratic Cost function
		//{
		//	double d;
		//	for (uint i = 0; i < z.rows; i++)
		//	{
		//		d = z.GetValue(i, 0);
		//		derivatives.SetValue(i, 0, d * (1.0 - d));
		//	}
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationLeakyReLUSoftMax : IActivationMethods
	{
		public readonly double alpha;
		public ActivationLeakyReLUSoftMax(double alpha = 0.1)
		{
			this.alpha = alpha;
		}
		public void ActivationMethod(Matrix outputs) // Leaky Rectified Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.LeakyReLU(outputs.GetValue(i, j), alpha));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			Functions.SoftMax(outputs);
		}
		public double Derivative(double input)
		{
			return Functions.LeakyReLUPrime(input, alpha);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives) // this is the SoftMax derivative and this may not work with the Quadratic Cost function
		//{
		//	double d;
		//	for (uint i = 0; i < z.rows; i++)
		//	{
		//		d = z.GetValue(i, 0);
		//		derivatives.SetValue(i, 0, d * (1.0 - d));
		//	}
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationTanhSoftMax : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Tanh function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.Tanh(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			Functions.SoftMax(outputs);
		}
		public double Derivative(double input)
		{
			return Functions.TanhPrime(input);
		}
		public void OutputDerivative(Matrix z, Matrix derivatives) // this is the SoftMax derivative and this may not work with the Quadratic Cost function
		{
			double d;
			for (uint i = 0; i < z.rows; i++)
			{
				d = z.GetValue(i, 0);
				derivatives.SetValue(i, 0, d * (1.0 - d));
			}
		}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeGlorotXavier(Weights, Biases);
		}
	}
	public class ActivationSigmoidSoftMax : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Sigmoid function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.Sigmoid(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			Functions.SoftMax(outputs);
		}
		public double Derivative(double input)
		{
			return Functions.TanhPrime(input);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives) // this is the SoftMax derivative and this may not work with the Quadratic Cost function
		//{
		//	double d;
		//	for (uint i = 0; i < z.rows; i++)
		//	{
		//		d = z.GetValue(i, 0);
		//		derivatives.SetValue(i, 0, d * (1.0 - d));
		//	}
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeGlorotXavier(Weights, Biases);
		}
	}
	public class ActivationReLULinear : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Rectified Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.ReLU(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			// Linear
		}
		public double Derivative(double input)
		{
			return Functions.ReLUPrime(input);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives)
		//{
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationELULinear : IActivationMethods
	{
		public readonly double alpha;
		public ActivationELULinear(double alpha = 1.0)
		{
			this.alpha = alpha;
		}
		public void ActivationMethod(Matrix outputs) // Exponential Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.ELU(outputs.GetValue(i, j), alpha));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			// Linear
		}
		public double Derivative(double input)
		{
			return Functions.ELUPrime(input, alpha);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives)
		//{
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationLeakyReLULinear : IActivationMethods
	{
		public readonly double alpha;
		public ActivationLeakyReLULinear(double alpha = 0.1)
		{
			this.alpha = alpha;
		}
		public void ActivationMethod(Matrix outputs) // Leaky Rectified Linear Unit function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.LeakyReLU(outputs.GetValue(i, j), alpha));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			// Linear
		}
		public double Derivative(double input)
		{
			return Functions.LeakyReLUPrime(input, alpha);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives)
		//{
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeHeNormal(Weights, Biases);
		}
	}
	public class ActivationTanhLinear : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Tanh function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.Tanh(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			// Linear
		}
		public double Derivative(double input)
		{
			return Functions.TanhPrime(input);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives)
		//{
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeGlorotXavier(Weights, Biases);
		}
	}
	public class ActivationSigmoidLinear : IActivationMethods
	{
		public void ActivationMethod(Matrix outputs) // Sigmoid function applied to whole matrix
		{
			for (uint i = 0; i < outputs.rows; i++)
				for (uint j = 0; j < outputs.columns; j++)
					outputs.SetValue(i, j, Functions.Sigmoid(outputs.GetValue(i, j)));
		}
		public void OutputActivationMethod(Matrix outputs)
		{
			// Linear
		}
		public double Derivative(double input)
		{
			return Functions.TanhPrime(input);
		}
		//public void OutputDerivative(Matrix z, Matrix derivatives)
		//{
		//}
		public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
		{
			Randomization.RandomizeGlorotXavier(Weights, Biases);
		}
	}
	//public class ActivationSigmoid : IActivationMethods
	//{
	//	public void ActivationMethod(Matrix outputs) // Sigmoid function applied to whole matrix
	//	{
	//		for (uint i = 0; i < outputs.rows; i++)
	//			for (uint j = 0; j < outputs.columns; j++)
	//				outputs.SetValue(i, j, Functions.Sigmoid(outputs.GetValue(i, j)));
	//	}
	//	public void OutputActivationMethod(Matrix outputs)
	//	{
	//		ActivationMethod(outputs);
	//	}
	//	public double Derivative(double input)
	//	{
	//		return Functions.SigmoidPrime(input);
	//	}
	//	public void OutputDerivative(Matrix z, Matrix derivatives)
	//	{
	//		for (uint i = 0; i < z.rows; i++)
	//			derivatives.SetValue(i, 0, Functions.SigmoidPrime(z.GetValue(i, 0)));
	//	}
	//	public void Randomize(List<Matrix> Weights, List<Matrix> Biases)
	//	{
	//		Randomization.RandomizeGlorotXavier(Weights, Biases);
	//	}
	//}
	static class Cost
	{
		public static void Delta(Matrix outputs, Matrix desiredOutputs, Matrix deltaValue)
		{
			if (desiredOutputs.rows == 1 && deltaValue.rows != desiredOutputs.rows) // this is the same as "Sparse Categorical Cross-Entropy" (SCCE) and requires a Linear final activation; it requires that the calling program FeedForward(givenInputs, activationObject), SoftMax(feedForward) and then find the index of the maximum of the feedForward.
			{
				uint desiredIndex = (uint)desiredOutputs.GetValue(0, 0);
				for (uint i = 0; i < deltaValue.rows; i++)
					deltaValue.SetValue(i, 0, outputs.GetValue(i, 0) - (i == desiredIndex ? 1 : 0));
			}
			else // this is "Categorical Cross-Entropy" (CCE) and it requires a one-hot array for the desired outputs; it is not memory efficient.
			{
				for (uint i = 0; i < deltaValue.rows; i++)
					deltaValue.SetValue(i, 0, outputs.GetValue(i, 0) - desiredOutputs.GetValue(i, 0));
			}
		}
		public static uint Index(Matrix feedForwardOutputs)
		{
			// Sparse Categorical Cross-Entropy
			Functions.SoftMax(feedForwardOutputs);
			return Functions.GetIndexMax(feedForwardOutputs);
		}
		public static double Loss(Matrix feedForwardOutputs, Matrix desiredOutputs)
		{
			// Categorical Cross-Entropy, desiredOutputs should be a one-hot vector with the same number of rows (classes) as feedForwardOutputs
			uint iO = Functions.GetIndexMax(feedForwardOutputs), iL = Functions.GetIndexMax(desiredOutputs);
			double a = (iO == iL) ? feedForwardOutputs.GetValue(iO, 0) : 0;
			if (a == 0)
				return 1;
			if (a == 1)
				return 0;
			return -Math.Log(a);
		}
	}
	public static class Functions
	{
		public static double Linear(double x) // Linear function
		{
			return x;
		}
		public static double LinearPrime() // derivative of Linear function (the line's slope)
		{
			return 1;
		}
		// alpha default might be 0.01, but this can be modified, bigger or smaller; tensorflow uses 0.2 while keras uses 0.3
		public static double LeakyReLU(double x, double alpha) // Rectified Linear Unit function (Leaky variant)
		{
			return x >= 0 ? x : (alpha * x);
		}
		public static double LeakyReLUPrime(double x, double alpha) // derivative of Leaky ReLU function
		{
			return x >= 0 ? 1 : alpha;
		}
		public static double ReLU(double x) // Rectified Linear Unit function
		{
			return x > 0 ? x : 0;
		}
		public static double ReLUPrime(double x) // derivative of ReLU function
		{
			return x > 0 ? 1 : 0;
		}
		public static double ELU(double x, double alpha) // Exponential Linear Unit function
		{
			return x >= 0 ? x : (alpha * (Math.Exp(x) - 1));
		}
		public static double ELUPrime(double x, double alpha) // derivative of ELU function
		{
			return x >= 0 ? 1 : (alpha * Math.Exp(x));
		}
		public static double Tanh(double x)
		{
			return (Math.Exp(x) - Math.Exp(-x)) / (Math.Exp(x) + Math.Exp(-x));
		}
		public static double TanhPrime(double x)
		{
			return 1 - ((Math.Exp(x) - Math.Exp(-x)) / (Math.Exp(x) + Math.Exp(-x))) * ((Math.Exp(x) - Math.Exp(-x)) / (Math.Exp(x) + Math.Exp(-x))); // this is simply: 1 - (tanh(x) * tanh(x))
		}
		public static double Sigmoid(double x)
		{
			return 1.0 / (1 + Math.Exp(-x));
		}
		public static double SigmoidPrime(double x) // derivative of Sigmoid function
		{
			return (1.0 / (1 + Math.Exp(-x))) * (1.0 - (1.0 / (1 + Math.Exp(-x)))); // this is simply: Sigmoid(x) * (1.0 - Sigmoid(x))
		}
		public static void SoftMax(Matrix input)
		{
			double max = input.GetValue(0, 0);

			for (uint i = 1; i < input.rows; i++)
				max = Math.Max(max, input.GetValue(i, 0));

			double val, sum = 0;
			for (uint i = 0; i < input.rows; i++)
			{
				val = Math.Exp(input.GetValue(i, 0) - max);
				input.SetValue(i, 0, val);
				sum += val;
			}

			for (uint i = 0; i < input.rows; i++)
			{
				val = input.GetValue(i, 0) / sum;
				input.SetValue(i, 0, val);
			}
		}
		public static uint GetIndexMax(Matrix m) // only pass 1 dimension matrices
		{
			uint index = 0;
			double a, max = m.GetValue(0, 0);
			for (uint i = 1; i < m.rows; i++)
			{
				a = m.GetValue(i, 0);
				if (a > max)
				{
					max = a;
					index = i;
				}
			}
			return index;
		}
	}
}

The confusion matrix code:


using System;
using System.Collections.Generic;

namespace ML
{
	public class Confusion
	{
        public List<Dictionary<string, string>> Samples { get; set; }
        public IEnumerable<string> Categories { get; set; }
        private Confusion()
		{
			Samples = new List<Dictionary<string, string>>();
			Categories = Array.Empty<string>();
		}
		public Confusion(IEnumerable<string> categories)
		{
			Samples = new List<Dictionary<string, string>>();
			Categories = categories;
		}
		public string ToJson()
		{
			return System.Text.Json.JsonSerializer.Serialize(this);
		}
        public void AddSample(string truth, string? label = null)
		{
			Samples.Add(new Dictionary<string, string>
			{
				{ "t", truth }, // t for true value
				{ "l", label ?? "?" } // l for label
			});
		}
		public void Reset()
		{
			Samples.Clear();
		}
		public string GetHtmlPage()
		{
			return $"<!DOCTYPE html><html lang='en'><head><meta charset='UTF-8' /><meta name='viewport' content='width=device-width, initial-scale=1.0' /><title>Confusion Matrix Chart</title><style>body{{margin:0;padding:0;}}</style></head><body><div id='confusion-container'></div><script src='data:text/javascript;base64,ZnVuY3Rpb24gX3R5cGVvZihuKXtyZXR1cm4gX3R5cGVvZj0iZnVuY3Rpb24iPT10eXBlb2YgU3ltYm9sJiYic3ltYm9sIj09dHlwZW9mIFN5bWJvbC5pdGVyYXRvcj9mdW5jdGlvbihuKXtyZXR1cm4gdHlwZW9mIG59OmZ1bmN0aW9uKG4pe3JldHVybiBuJiYiZnVuY3Rpb24iPT10eXBlb2YgU3ltYm9sJiZuLmNvbnN0cnVjdG9yPT09U3ltYm9sJiZuIT09U3ltYm9sLnByb3RvdHlwZT8ic3ltYm9sIjp0eXBlb2Ygbn0sX3R5cGVvZihuKX1mdW5jdGlvbiBfdG9Db25zdW1hYmxlQXJyYXkobil7cmV0dXJuIF9hcnJheVdpdGhvdXRIb2xlcyhuKXx8X2l0ZXJhYmxlVG9BcnJheShuKXx8X3Vuc3VwcG9ydGVkSXRlcmFibGVUb0FycmF5KG4pfHxfbm9uSXRlcmFibGVTcHJlYWQoKX1mdW5jdGlvbiBfbm9uSXRlcmFibGVTcHJlYWQoKXt0aHJvdyBuZXcgVHlwZUVycm9yKCJJbnZhbGlkIGF0dGVtcHQgdG8gc3ByZWFkIG5vbi1pdGVyYWJsZSBpbnN0YW5jZS5cbkluIG9yZGVyIHRvIGJlIGl0ZXJhYmxlLCBub24tYXJyYXkgb2JqZWN0cyBtdXN0IGhhdmUgYSBbU3ltYm9sLml0ZXJhdG9yXSgpIG1ldGhvZC4iKTt9ZnVuY3Rpb24gX2l0ZXJhYmxlVG9BcnJheShuKXtpZih0eXBlb2YgU3ltYm9sIT0idW5kZWZpbmVkIiYmbltTeW1ib2wuaXRlcmF0b3JdIT1udWxsfHxuWyJAQGl0ZXJhdG9yIl0hPW51bGwpcmV0dXJuIEFycmF5LmZyb20obil9ZnVuY3Rpb24gX2FycmF5V2l0aG91dEhvbGVzKG4pe2lmKEFycmF5LmlzQXJyYXkobikpcmV0dXJuIF9hcnJheUxpa2VUb0FycmF5KG4pfWZ1bmN0aW9uIF9jcmVhdGVGb3JPZkl0ZXJhdG9ySGVscGVyKG4sdCl7dmFyIGk9dHlwZW9mIFN5bWJvbCE9InVuZGVmaW5lZCImJm5bU3ltYm9sLml0ZXJhdG9yXXx8blsiQEBpdGVyYXRvciJdLHIsdSxmLGUsbztpZighaSl7aWYoQXJyYXkuaXNBcnJheShuKXx8KGk9X3Vuc3VwcG9ydGVkSXRlcmFibGVUb0FycmF5KG4pKXx8dCYmbiYmdHlwZW9mIG4ubGVuZ3RoPT0ibnVtYmVyIilyZXR1cm4gaSYmKG49aSkscj0wLHU9ZnVuY3Rpb24oKXt9LHtzOnUsbjpmdW5jdGlvbigpe3JldHVybiByPj1uLmxlbmd0aD97ZG9uZTohMH06e2RvbmU6ITEsdmFsdWU6bltyKytdfX0sZTpmdW5jdGlvbihuKXt0aHJvdyBuO30sZjp1fTt0aHJvdyBuZXcgVHlwZUVycm9yKCJJbnZhbGlkIGF0dGVtcHQgdG8gaXRlcmF0ZSBub24taXRlcmFibGUgaW5zdGFuY2UuXG5JbiBvcmRlciB0byBiZSBpdGVyYWJsZSwgbm9uLWFycmF5IG9iamVjdHMgbXVzdCBoYXZlIGEgW1N5bWJvbC5pdGVyYXRvcl0oKSBtZXRob2QuIik7fXJldHVybiBmPSEwLGU9ITEse3M6ZnVuY3Rpb24oKXtpPWkuY2FsbChuKX0sbjpmdW5jdGlvbigpe3ZhciBuPWkubmV4dCgpO3JldHVybiBmPW4uZG9uZSxufSxlOmZ1bmN0aW9uKG4pe2U9ITA7bz1ufSxmOmZ1bmN0aW9uKCl7dHJ5e2Z8fGkucmV0dXJuPT1udWxsfHxpLnJldHVybigpfWZpbmFsbHl7aWYoZSl0aHJvdyBvO319fX1mdW5jdGlvbiBfdW5zdXBwb3J0ZWRJdGVyYWJsZVRvQXJyYXkobix0KXtpZihuKXtpZih0eXBlb2Ygbj09InN0cmluZyIpcmV0dXJuIF9hcnJheUxpa2VUb0FycmF5KG4sdCk7dmFyIGk9T2JqZWN0LnByb3RvdHlwZS50b1N0cmluZy5jYWxsKG4pLnNsaWNlKDgsLTEpO3JldHVybihpPT09Ik9iamVjdCImJm4uY29uc3RydWN0b3ImJihpPW4uY29uc3RydWN0b3IubmFtZSksaT09PSJNYXAifHxpPT09IlNldCIpP0FycmF5LmZyb20obik6aT09PSJBcmd1bWVudHMifHwvXig/OlVpfEkpbnQoPzo4fDE2fDMyKSg/OkNsYW1wZWQpP0FycmF5JC8udGVzdChpKT9fYXJyYXlMaWtlVG9BcnJheShuLHQpOnZvaWQgMH19ZnVuY3Rpb24gX2FycmF5TGlrZVRvQXJyYXkobix0KXsodD09bnVsbHx8dD5uLmxlbmd0aCkmJih0PW4ubGVuZ3RoKTtmb3IodmFyIGk9MCxyPW5ldyBBcnJheSh0KTtpPHQ7aSsrKXJbaV09bltpXTtyZXR1cm4gcn1mdW5jdGlvbiBfY2xhc3NDYWxsQ2hlY2sobix0KXtpZighKG4gaW5zdGFuY2VvZiB0KSl0aHJvdyBuZXcgVHlwZUVycm9yKCJDYW5ub3QgY2FsbCBhIGNsYXNzIGFzIGEgZnVuY3Rpb24iKTt9ZnVuY3Rpb24gX2RlZmluZVByb3BlcnRpZXMobix0KXtmb3IodmFyIGkscj0wO3I8dC5sZW5ndGg7cisrKWk9dFtyXSxpLmVudW1lcmFibGU9aS5lbnVtZXJhYmxlfHwhMSxpLmNvbmZpZ3VyYWJsZT0hMCwidmFsdWUiaW4gaSYmKGkud3JpdGFibGU9ITApLE9iamVjdC5kZWZpbmVQcm9wZXJ0eShuLF90b1Byb3BlcnR5S2V5KGkua2V5KSxpKX1mdW5jdGlvbiBfY3JlYXRlQ2xhc3Mobix0LGkpe3JldHVybiB0JiZfZGVmaW5lUHJvcGVydGllcyhuLnByb3RvdHlwZSx0KSxpJiZfZGVmaW5lUHJvcGVydGllcyhuLGkpLE9iamVjdC5kZWZpbmVQcm9wZXJ0eShuLCJwcm90b3R5cGUiLHt3cml0YWJsZTohMX0pLG59ZnVuY3Rpb24gX3RvUHJvcGVydHlLZXkobil7dmFyIHQ9X3RvUHJpbWl0aXZlKG4sInN0cmluZyIpO3JldHVybiJzeW1ib2wiPT1fdHlwZW9mKHQpP3Q6dCsiIn1mdW5jdGlvbiBfdG9QcmltaXRpdmUobix0KXt2YXIgaSxyO2lmKCJvYmplY3QiIT1fdHlwZW9mKG4pfHwhbilyZXR1cm4gbjtpZihpPW5bU3ltYm9sLnRvUHJpbWl0aXZlXSx2b2lkIDAhPT1pKXtpZihyPWkuY2FsbChuLHR8fCJkZWZhdWx0IiksIm9iamVjdCIhPV90eXBlb2YocikpcmV0dXJuIHI7dGhyb3cgbmV3IFR5cGVFcnJvcigiQEB0b1ByaW1pdGl2ZSBtdXN0IHJldHVybiBhIHByaW1pdGl2ZSB2YWx1ZS4iKTt9cmV0dXJuKCJzdHJpbmciPT09dD9TdHJpbmc6TnVtYmVyKShuKX1BcnJheS5wcm90b3R5cGUuZmxhdHx8KEFycmF5LnByb3RvdHlwZS5mbGF0PWZ1bmN0aW9uKCl7dmFyIG47aWYodGhpcz09bnVsbCl0aHJvdyBuZXcgRXJyb3IoInRoaXMgaXMgbnVsbCBvciBub3QgZGVmaW5lZCIpO3ZhciB0PU9iamVjdCh0aGlzKSxyPXQubGVuZ3RoPj4+MCxpPVtdO2ZvcihuPTA7bjxyO24rKylBcnJheS5pc0FycmF5KHRbbl0pP2kuY29uY2F0KHQuZmxhdCgpKTppLnB1c2godFtuXSk7cmV0dXJuIGl9KTt2YXIgQ29uZnVzaW9uTWF0cml4Q2hhcnQ9ZnVuY3Rpb24oKXtmdW5jdGlvbiBuKHQsaSxyKXt2YXIgZix1O19jbGFzc0NhbGxDaGVjayh0aGlzLG4pO3ZhciBzPXIubGVuZ3RoKzEsZT1NYXRoLm1pbih3aW5kb3cuaW5uZXJXaWR0aCx3aW5kb3cuaW5uZXJIZWlnaHQpLyhzKzEpLG89ZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgidGFibGUiKTtvLnN0eWxlLmJvcmRlckNvbGxhcHNlPSJjb2xsYXBzZSI7by5zdHlsZS50ZXh0QWxpZ249ImNlbnRlciI7by5zdHlsZS5tYXJnaW5MZWZ0PWUrInB4IjtvLnN0eWxlLm1hcmdpblRvcD1lKyJweCI7dC5hcHBlbmRDaGlsZChvKTtmPWRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoImRpdiIpO2YuaW5uZXJIVE1MPSJwcmVkaWN0ZWQgY2F0ZWdvcnkiO2Yuc3R5bGUucG9zaXRpb249ImFic29sdXRlIjtmLnN0eWxlLmZvbnRTaXplPSJ4LWxhcmdlIjtmLnN0eWxlLnRvcD0iMHB4IjtmLnN0eWxlLmxlZnQ9ZSoxLjUrInB4IjtmLnN0eWxlLmhlaWdodD1lKyJweCI7Zi5zdHlsZS5kaXNwbGF5PSJmbGV4IjtmLnN0eWxlLmFsaWduSXRlbXM9ImNlbnRlciI7Zi5zdHlsZS5tYXJnaW5MZWZ0PWUvMisicHgiO3QuYXBwZW5kQ2hpbGQoZik7dT1kb2N1bWVudC5jcmVhdGVFbGVtZW50KCJkaXYiKTt1LmlubmVySFRNTD0idHJ1ZSBjYXRlZ29yeSI7dS5zdHlsZS5wb3NpdGlvbj0iYWJzb2x1dGUiO3Uuc3R5bGUuZm9udFNpemU9IngtbGFyZ2UiO3Uuc3R5bGUudG9wPWUqMisicHgiO3Uuc3R5bGUubGVmdD0iMHB4Ijt1LnN0eWxlLnRyYW5zZm9ybT0idHJhbnNsYXRlKC01MCUpIHJvdGF0ZSgtOTBkZWcpIjt1LnN0eWxlLmhlaWdodD1lKyJweCI7dS5zdHlsZS5kaXNwbGF5PSJmbGV4Ijt1LnN0eWxlLmFsaWduSXRlbXM9ImNlbnRlciI7dS5zdHlsZS5tYXJnaW5MZWZ0PWUvMisicHgiO3QuYXBwZW5kQ2hpbGQodSk7dGhpcy5nbyhpLHMscixvLGUpfXJldHVybiBfY3JlYXRlQ2xhc3Mobixbe2tleToiZ28iLHZhbHVlOmZ1bmN0aW9uKG4sdCxpLHIsdSl7Zm9yKHZhciBjLGUsZixzLGE9ZnVuY3Rpb24obix0LGkpe2Zvcih2YXIgZSxzLG8sZix1PVtdLHI9MDtyPG47cisrKWZvcih1W3JdPVtdLGY9MDtmPG47ZisrKXVbcl1bZl09MDtlPV9jcmVhdGVGb3JPZkl0ZXJhdG9ySGVscGVyKHQpO3RyeXtmb3IoZS5zKCk7IShzPWUubigpKS5kb25lOylvPXMudmFsdWUsdVtpLmluZGV4T2Yoby50KSsxXVtpLmluZGV4T2Yoby5sKSsxXSsrfWNhdGNoKGgpe2UuZShoKX1maW5hbGx5e2UuZigpfWZvcihyPTE7cjxuO3IrKylmb3IoZj0xO2Y8bjtmKyspdVswXVtmXSs9dVtyXVtmXSx1W3JdWzBdKz11W3JdW2ZdO2ZvcihyPTE7cjxuO3IrKyl1WzBdW3JdLT11W3JdWzBdLHVbMF1bcl0+MCYmKHVbMF1bcl09IisiK3VbMF1bcl0pO3JldHVybiB1WzBdWzBdPSIiLHV9LHY9ZnVuY3Rpb24obix0LGkpe3JldHVybihpLW4pLyh0LW4pfSxoPWEodCxuLGkpLGw9aC5zbGljZSgxKS5tYXAoZnVuY3Rpb24obil7cmV0dXJuIG4uc2xpY2UoMSl9KS5mbGF0KCkseT1NYXRoLm1pbi5hcHBseShNYXRoLF90b0NvbnN1bWFibGVBcnJheShsKSkscD1NYXRoLm1heC5hcHBseShNYXRoLF90b0NvbnN1bWFibGVBcnJheShsKSksbz0wO288dDtvKyspZm9yKGM9ZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgidHIiKSxyLmFwcGVuZENoaWxkKGMpLGU9MDtlPHQ7ZSsrKXtpZihmPWRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoInRkIiksZi5zdHlsZS53aWR0aD11KyJweCIsZi5zdHlsZS5oZWlnaHQ9dSsicHgiLGYuc3R5bGUucGFkZGluZz0iMCIsbz09MCYmZT4wKXtmLmFwcGVuZENoaWxkKGRvY3VtZW50LmNyZWF0ZVRleHROb2RlKGlbZS0xXSkpO2Yuc3R5bGUub3ZlcmZsb3c9ImhpZGRlbiI7Zi5zdHlsZS52ZXJ0aWNhbEFsaWduPSJjZW50ZXIiO2Yuc3R5bGUudGV4dEFsaWduPSJjZW50ZXIiO3ZhciBzPTIqaFtvXVtlXS9oW2VdW29dLHc9cz49MD9zKjI1NTowLGI9czw9MD8tcyoyNTU6MDtmLnN0eWxlLmNvbG9yPSJyZ2IoIit3KyIsIitiKyIsMCkifWU9PTAmJm8+MCYmKGYuYXBwZW5kQ2hpbGQoZG9jdW1lbnQuY3JlYXRlVGV4dE5vZGUoaVtvLTFdKSksZi5zdHlsZS5vdmVyZmxvdz0iaGlkZGVuIixmLnN0eWxlLnZlcnRpY2FsQWxpZ249ImNlbnRlciIsZi5zdHlsZS50ZXh0QWxpZ249ImNlbnRlciIpO2YuYXBwZW5kQ2hpbGQoZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgiYnIiKSk7Zi5hcHBlbmRDaGlsZChkb2N1bWVudC5jcmVhdGVUZXh0Tm9kZShoW29dW2VdKSk7bz4wJiZlPjAmJihzPXYoeSxwLGhbb11bZV0pLGYuc3R5bGUuYmFja2dyb3VuZENvbG9yPW89PWU/InJnYmEoMCwyNTUsMCwiK3MrIikiOiJyZ2JhKDI1NSwwLDAsIitzKyIpIik7Yy5hcHBlbmRDaGlsZChmKX19fV0pfSgpOw=='></script><script type='text/javascript'>var obj=JSON.parse('{ToJson()}');new ConfusionMatrixChart(document.getElementById('confusion-container'),obj.Samples,obj.Categories);</script></body></html>";
		}
	}
}

This site uses cookies. Cookies are simple text files stored on the user's computer. They are used for adding features and security to this site. Read the privacy policy.
CLOSE