From fc14381ad0ec82c0d469089718a85318936d2cac Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 6 Dec 2025 18:55:48 +0200 Subject: [PATCH 01/23] ML-392 Added `Layer` and `Hidden` interfaces to define contracts for neural network layers. --- .../Layers/Base/Contracts/Hidden.php | 29 ++++++++++ src/NeuralNet/Layers/Base/Contracts/Layer.php | 57 +++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Hidden.php create mode 100644 src/NeuralNet/Layers/Base/Contracts/Layer.php diff --git a/src/NeuralNet/Layers/Base/Contracts/Hidden.php b/src/NeuralNet/Layers/Base/Contracts/Hidden.php new file mode 100644 index 000000000..b73b63521 --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Hidden.php @@ -0,0 +1,29 @@ + + */ +interface Hidden extends Layer +{ + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred; +} diff --git a/src/NeuralNet/Layers/Base/Contracts/Layer.php b/src/NeuralNet/Layers/Base/Contracts/Layer.php new file mode 100644 index 000000000..10cf17b6e --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Layer.php @@ -0,0 +1,57 @@ + + */ +interface Layer extends Stringable +{ + /** + * The width of the layer. i.e. the number of neurons or computation nodes. + * + * @internal + * + * @return positive-int + */ + public function width() : int; + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int; + + /** + * Feed the input forward to the next layer in the network. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray; + + /** + * Forward pass during inference. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray; +} From 9546c9d84fce16f02efd3db0475352083eea8212 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sat, 6 Dec 2025 19:14:33 +0200 Subject: [PATCH 02/23] ML-393 Added standalone `Activation` layer implementation with comprehensive unit tests and fixed broken source file link in the documentation --- .../hidden-layers/activation.md | 8 +- .../Layers/Activation/Activation.php | 184 ++++++++++++++++++ .../Layers/Base/Contracts/Hidden.php | 1 - .../Layers/Activation/ActivationTest.php | 181 +++++++++++++++++ 4 files changed, 369 insertions(+), 5 deletions(-) create mode 100644 src/NeuralNet/Layers/Activation/Activation.php create mode 100644 tests/NeuralNet/Layers/Activation/ActivationTest.php diff --git a/docs/neural-network/hidden-layers/activation.md b/docs/neural-network/hidden-layers/activation.md index a4e4cde73..57d4dc46c 100644 --- a/docs/neural-network/hidden-layers/activation.md +++ b/docs/neural-network/hidden-layers/activation.md @@ -1,4 +1,4 @@ -[source] +[source] # Activation Activation layers apply a user-defined non-linear activation function to their inputs. They often work in conjunction with [Dense](dense.md) layers as a way to transform their output. @@ -10,8 +10,8 @@ Activation layers apply a user-defined non-linear activation function to their i ## Example ```php -use Rubix\ML\NeuralNet\Layers\Activation; -use Rubix\ML\NeuralNet\ActivationFunctions\ReLU; +use Rubix\ML\NeuralNet\Layers\Activation\Activation; +use Rubix\ML\NeuralNet\ActivationFunctions\ReLU\ReLU; $layer = new Activation(new ReLU()); -``` \ No newline at end of file +``` diff --git a/src/NeuralNet/Layers/Activation/Activation.php b/src/NeuralNet/Layers/Activation/Activation.php new file mode 100644 index 000000000..4394350b4 --- /dev/null +++ b/src/NeuralNet/Layers/Activation/Activation.php @@ -0,0 +1,184 @@ + + */ +class Activation implements Hidden +{ + /** + * The function that computes the output of the layer. + * + * @var ActivationFunction + */ + protected ActivationFunction $activationFn; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param ActivationFunction $activationFn + */ + public function __construct(ActivationFunction $activationFn) + { + $this->activationFn = $activationFn; + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->activationFn->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->activationFn->activate($input); + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $input = $this->input; + $output = $this->output; + + $this->input = $this->output = null; + + return new Deferred( + [$this, 'gradient'], + [$input, $output, $prevGradient] + ); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray $output + * @param Deferred $prevGradient + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, Deferred $prevGradient) : NDArray + { + return NumPower::multiply( + $this->activationFn->differentiate($input), + $prevGradient() + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Activation (activation fn: {$this->activationFn})"; + } +} diff --git a/src/NeuralNet/Layers/Base/Contracts/Hidden.php b/src/NeuralNet/Layers/Base/Contracts/Hidden.php index b73b63521..f903e3916 100644 --- a/src/NeuralNet/Layers/Base/Contracts/Hidden.php +++ b/src/NeuralNet/Layers/Base/Contracts/Hidden.php @@ -3,7 +3,6 @@ namespace Rubix\ML\NeuralNet\Layers\Base\Contracts; use Rubix\ML\Deferred; -use Rubix\ML\NeuralNet\Layers\Base\Contracts\Layer; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; /** diff --git a/tests/NeuralNet/Layers/Activation/ActivationTest.php b/tests/NeuralNet/Layers/Activation/ActivationTest.php new file mode 100644 index 000000000..2c203ad18 --- /dev/null +++ b/tests/NeuralNet/Layers/Activation/ActivationTest.php @@ -0,0 +1,181 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]), + [ + [1.0, 2.5, 0.0], + [0.1, 0.0, 3.0], + [0.002, 0.0, 0.0], + ], + ], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]), + NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]), + [ + [0.25, 0.7, 0.0], + [0.5, 0.0, 0.01], + [0.25, 0, 0.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Activation(new ReLU()); + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Activation (activation fn: ReLU)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward activations')] + #[DataProvider('forwardProvider')] + public function testForward(NDArray $input, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes backpropagated gradients after forward pass')] + #[DataProvider('backProvider')] + public function testBack(NDArray $input, NDArray $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Forward pass to set internal input/output state + $this->layer->forward($input); + + $gradient = $this->layer + ->back(prevGradient: new Deferred(fn: fn () => $prevGrad), optimizer: $this->optimizer) + ->compute(); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(NDArray $input, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient correctly given input, output, and previous gradient')] + #[DataProvider('backProvider')] + public function testGradient(NDArray $input, NDArray $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Produce output to pass explicitly to gradient + $output = $this->layer->forward($input); + + $gradient = $this->layer->gradient( + $input, + $output, + new Deferred(fn: fn () => $prevGrad) + ); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } +} From 14a2b6f51d142a339a69fa9492a354138133611f Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 00:41:57 +0200 Subject: [PATCH 03/23] ML-394 Implemented `BatchNorm` layer with comprehensive unit tests and updated documentation with fixed source file link. Added `Parametric` interface to define parameterized layers. --- .../hidden-layers/batch-norm.md | 10 +- .../Layers/Base/Contracts/Parametric.php | 33 ++ src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 424 ++++++++++++++++++ .../Layers/BatchNorm/BatchNormTest.php | 103 +++++ 4 files changed, 565 insertions(+), 5 deletions(-) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Parametric.php create mode 100644 src/NeuralNet/Layers/BatchNorm/BatchNorm.php create mode 100644 tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php diff --git a/docs/neural-network/hidden-layers/batch-norm.md b/docs/neural-network/hidden-layers/batch-norm.md index 99fdefd22..373113e14 100644 --- a/docs/neural-network/hidden-layers/batch-norm.md +++ b/docs/neural-network/hidden-layers/batch-norm.md @@ -1,4 +1,4 @@ -[source] +[source] # Batch Norm Batch Norm layers normalize the activations of the previous layer such that the mean activation is *close* to 0 and the standard deviation is *close* to 1. Adding Batch Norm reduces the amount of covariate shift within the network which makes it possible to use higher learning rates and thus converge faster under some circumstances. @@ -12,12 +12,12 @@ Batch Norm layers normalize the activations of the previous layer such that the ## Example ```php -use Rubix\ML\NeuralNet\Layers\BatchNorm; -use Rubix\ML\NeuralNet\Initializers\Constant; -use Rubix\ML\NeuralNet\Initializers\Normal; +use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; +use Rubix\ML\NeuralNet\Initializers\Constant\Constant; +use Rubix\ML\NeuralNet\Initializers\Normal\Normal; $layer = new BatchNorm(0.7, new Constant(0.), new Normal(1.)); ``` ## References -[^1]: S. Ioffe et al. (2015). Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. \ No newline at end of file +[^1]: S. Ioffe et al. (2015). Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. diff --git a/src/NeuralNet/Layers/Base/Contracts/Parametric.php b/src/NeuralNet/Layers/Base/Contracts/Parametric.php new file mode 100644 index 000000000..ed772c85d --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Parametric.php @@ -0,0 +1,33 @@ + + */ +interface Parametric +{ + /** + * Return the parameters of the layer. + * + * @return Generator<\Rubix\ML\NeuralNet\Parameter> + */ + public function parameters() : Generator; + + /** + * Restore the parameters on the layer from an associative array. + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void; +} diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php new file mode 100644 index 000000000..98c401f48 --- /dev/null +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -0,0 +1,424 @@ + 1.0) { + throw new InvalidArgumentException("Decay must be between 0 and 1, $decay given."); + } + + $this->decay = $decay; + $this->betaInitializer = $betaInitializer ?? new Constant(0.0); + $this->gammaInitializer = $gammaInitializer ?? new Constant(1.0); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + // Initialize beta and gamma as vectors of length fanOut + // We request a [fanOut, 1] NDArray and then flatten to 1-D + $betaMat = $this->betaInitializer->initialize(1, $fanOut); + $gammaMat = $this->gammaInitializer->initialize(1, $fanOut); + + $beta = NumPower::flatten($betaMat); + $gamma = NumPower::flatten($gammaMat); + + $this->beta = new Parameter($beta); + $this->gamma = new Parameter($gamma); + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + $rows = $input->shape()[0]; + $meanArr = []; + $varArr = []; + $stdInvArr = []; + + for ($i = 0; $i < $rows; $i++) { + $meanArr[$i] = NumPower::mean($input->toArray()[$i]); + $varArr[$i] = NumPower::variance($input->toArray()[$i]); + $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); + } + + $mean = NumPower::array($meanArr); + + $variance = NumPower::array($varArr); + $variance = NumPower::clip($variance, EPSILON, PHP_FLOAT_MAX); + + $stdInv = NumPower::array($stdInvArr); + + $xHat = NumPower::multiply( + NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), + $stdInv + ); + $xHat = NumPower::transpose($xHat, [1, 0]); + + // Initialize running stats if needed + if (!$this->mean or !$this->variance) { + $this->mean = $mean; + $this->variance = $variance; + } + + // Update running mean/variance: running = running*(1-decay) + current*decay + $this->mean = NumPower::add( + NumPower::multiply($this->mean, 1.0 - $this->decay), + NumPower::multiply($mean, $this->decay) + ); + + $this->variance = NumPower::add( + NumPower::multiply($this->variance, 1.0 - $this->decay), + NumPower::multiply($variance, $this->decay) + ); + + $this->stdInv = $stdInv; + $this->xHat = $xHat; + + // gamma * xHat + beta (per-column scale/shift) using NDArray ops + return NumPower::add(NumPower::multiply($xHat, $this->gamma->param()), $this->beta->param()); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + if (!$this->mean or !$this->variance or !$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + $xHat = NumPower::divide( + NumPower::subtract($input, $this->mean), + NumPower::sqrt($this->variance) + ); + + + $return = NumPower::add( + NumPower::multiply( + $xHat, + $this->gamma->param() + ), + $this->beta->param() + ); + //pp("xxxxxxxxxxxxxxxxxxxxxxxxxx", $return->toArray()); + + return $return; + } + + /** + * Calculate the errors and gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->stdInv or !$this->xHat) { + throw new RuntimeException('Must perform forward pass before' + . ' backpropagating.'); + } + + $dOut = $prevGradient(); +// pp('New dOut: ', $dOut->toArray()); + + $dBeta = NumPower::sum($dOut, 1); +// pp('New dBeta: ', $dBeta->toArray()); + + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), 1); +// pp('New dGamma: ', $dGamma->toArray()); + + $gamma = $this->gamma->param(); + //pp('New Gamma: ', $gamma->toArray()); + + $this->beta->update($dBeta, $optimizer); + $this->gamma->update($dGamma, $optimizer); + + $stdInv = $this->stdInv; + $xHat = $this->xHat; + + $this->stdInv = $this->xHat = null; + + $return = new Deferred( + [$this, 'gradient'], + [$dOut, $gamma, $stdInv, $xHat] + ); + + //pp('New back: ', $dOut->toArray(), $gamma->toArray(), $stdInv->toArray(), $xHat->toArray(), end: "\n"); + + return $return; + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $dOut + * @param NDArray $gamma + * @param NDArray $stdInv + * @param NDArray $xHat + * @return NDArray + */ + public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray + { + // Implement the same formula using PHP arrays + $dOutArr = $dOut->toArray(); + $gammaArr = $gamma->toArray(); // 1-D length n + $stdInvArr = $stdInv->toArray(); // 1-D length n + $xHatArr = $xHat->toArray(); // [m, n] + + $m = count($dOutArr); + $n = $m > 0 ? count($dOutArr[0]) : 0; + + // dXHat = dOut * gamma (per column) + $dXHatArr = []; + for ($i = 0; $i < $m; $i++) { + $row = []; + for ($j = 0; $j < $n; $j++) { + $row[] = $dOutArr[$i][$j] * $gammaArr[$j]; + } + $dXHatArr[] = $row; + } + + // xHatSigma = sum(dXHat * xHat) per column + $xHatSigma = array_fill(0, $n, 0.0); + $dXHatSigma = array_fill(0, $n, 0.0); + for ($j = 0; $j < $n; $j++) { + $sum1 = 0.0; + $sum2 = 0.0; + for ($i = 0; $i < $m; $i++) { + $sum1 += $dXHatArr[$i][$j] * $xHatArr[$i][$j]; + $sum2 += $dXHatArr[$i][$j]; + } + $xHatSigma[$j] = $sum1; + $dXHatSigma[$j] = $sum2; + } + + // Compute gradient for previous layer per formula: + // dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) + $dXArr = []; + for ($i = 0; $i < $m; $i++) { + $row = []; + for ($j = 0; $j < $n; $j++) { + $val = ($dXHatArr[$i][$j] * $m) + - $dXHatSigma[$j] + - ($xHatArr[$i][$j] * $xHatSigma[$j]); + $row[] = $val * ($stdInvArr[$j] / ($m ?: 1)); + } + $dXArr[] = $row; + } + + return NumPower::array($dXArr); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->beta or !$this->gamma) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'beta' => $this->beta; + yield 'gamma' => $this->gamma; + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->beta = $parameters['beta']; + $this->gamma = $parameters['gamma']; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Batch Norm (decay: {$this->decay}, beta initializer: {$this->betaInitializer}," + . " gamma initializer: {$this->gammaInitializer})"; + } +} diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php new file mode 100644 index 000000000..ad5fcdc07 --- /dev/null +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -0,0 +1,103 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new BatchNorm( + decay: 0.9, + betaInitializer: new Constant(0.0), + gammaInitializer: new Constant(1.0) + ); + } + + public function testInitializeForwardBackInfer() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + + $expected = [ + [-0.1251222, 1.2825030, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974157, -1.4101899, 0.6127743], + ]; + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $expected = [ + [-0.06445877134888621, 0.027271018647605647, 0.03718775270128047], + [0.11375900761901864, -0.10996704069838469, -0.0037919669206339162], + [-0.11909780311643131, -0.01087038130262698, 0.1299681844190583], + ]; + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + +// $expected = [ +// [-0.1260783, 1.2804902385302876, -1.1575619225761131], +// [-0.6718883801743488, -0.7438003494787433, 1.4135587296530918], +// [0.7956943312039361, -1.4105786650534555, 0.6111643338495193], +// ]; +// +// $infer = $this->layer->infer($this->input); +// +// self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); +// self::assertTrue(true); + } +} From 46e101b00b689352c8b919787fbd36ccd1d18d18 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 18:17:21 +0200 Subject: [PATCH 04/23] ML-395 Refactored `BatchNorm` layer to improve row/column normalization support --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 99 +++---- .../Layers/BatchNorm/BatchNormTest.php | 247 ++++++++++++++++-- 2 files changed, 258 insertions(+), 88 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 98c401f48..b7c170abb 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -105,6 +105,13 @@ class BatchNorm implements Hidden, Parametric */ protected ?NDArray $xHat = null; + /** + * Row-wise or column-wise normalization. + * + * @var int + */ + protected const int COLUMN_WISE = 1; + /** * @param float $decay * @param Initializer|null $betaInitializer @@ -246,20 +253,21 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } + // Number of rows + $m = $input->shape()[0]; + $xHat = NumPower::divide( - NumPower::subtract($input, $this->mean), - NumPower::sqrt($this->variance) + NumPower::subtract($input, NumPower::reshape($this->mean, [$m, 1])), + NumPower::reshape(NumPower::sqrt($this->variance), [$m, 1]) ); - - $return = NumPower::add( + return NumPower::add( NumPower::multiply( $xHat, $this->gamma->param() ), $this->beta->param() ); - //pp("xxxxxxxxxxxxxxxxxxxxxxxxxx", $return->toArray()); return $return; } @@ -286,16 +294,9 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred } $dOut = $prevGradient(); -// pp('New dOut: ', $dOut->toArray()); - - $dBeta = NumPower::sum($dOut, 1); -// pp('New dBeta: ', $dBeta->toArray()); - - $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), 1); -// pp('New dGamma: ', $dGamma->toArray()); - + $dBeta = NumPower::sum($dOut, self::COLUMN_WISE); + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::COLUMN_WISE); $gamma = $this->gamma->param(); - //pp('New Gamma: ', $gamma->toArray()); $this->beta->update($dBeta, $optimizer); $this->gamma->update($dGamma, $optimizer); @@ -305,13 +306,11 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred $this->stdInv = $this->xHat = null; - $return = new Deferred( + return new Deferred( [$this, 'gradient'], [$dOut, $gamma, $stdInv, $xHat] ); - //pp('New back: ', $dOut->toArray(), $gamma->toArray(), $stdInv->toArray(), $xHat->toArray(), end: "\n"); - return $return; } @@ -328,54 +327,26 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred */ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray { - // Implement the same formula using PHP arrays - $dOutArr = $dOut->toArray(); - $gammaArr = $gamma->toArray(); // 1-D length n - $stdInvArr = $stdInv->toArray(); // 1-D length n - $xHatArr = $xHat->toArray(); // [m, n] - - $m = count($dOutArr); - $n = $m > 0 ? count($dOutArr[0]) : 0; - - // dXHat = dOut * gamma (per column) - $dXHatArr = []; - for ($i = 0; $i < $m; $i++) { - $row = []; - for ($j = 0; $j < $n; $j++) { - $row[] = $dOutArr[$i][$j] * $gammaArr[$j]; - } - $dXHatArr[] = $row; - } - - // xHatSigma = sum(dXHat * xHat) per column - $xHatSigma = array_fill(0, $n, 0.0); - $dXHatSigma = array_fill(0, $n, 0.0); - for ($j = 0; $j < $n; $j++) { - $sum1 = 0.0; - $sum2 = 0.0; - for ($i = 0; $i < $m; $i++) { - $sum1 += $dXHatArr[$i][$j] * $xHatArr[$i][$j]; - $sum2 += $dXHatArr[$i][$j]; - } - $xHatSigma[$j] = $sum1; - $dXHatSigma[$j] = $sum2; - } - - // Compute gradient for previous layer per formula: - // dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) - $dXArr = []; - for ($i = 0; $i < $m; $i++) { - $row = []; - for ($j = 0; $j < $n; $j++) { - $val = ($dXHatArr[$i][$j] * $m) - - $dXHatSigma[$j] - - ($xHatArr[$i][$j] * $xHatSigma[$j]); - $row[] = $val * ($stdInvArr[$j] / ($m ?: 1)); - } - $dXArr[] = $row; - } + $dXHat = NumPower::multiply($dOut, $gamma); + $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::COLUMN_WISE); + $dXHatSigma = NumPower::sum($dXHat, self::COLUMN_WISE); + + // Number of rows + $m = $dOut->shape()[0]; + + // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) + return NumPower::multiply( + NumPower::subtract( + NumPower::subtract( + NumPower::multiply($dXHat, $m), + NumPower::reshape($dXHatSigma, [$m, 1]) + ), + NumPower::multiply($xHat, NumPower::reshape($xHatSigma, [$m, 1])) + ), + NumPower::reshape(NumPower::divide($stdInv, $m), [$m, 1]) + ); - return NumPower::array($dXArr); + return $return; } /** diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index ad5fcdc07..9f05ab47b 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -8,11 +8,17 @@ use PHPUnit\Framework\Attributes\Group; use NDArray; use NumPower; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; +use PHPUnit\Framework\Attributes\DataProvider; use Rubix\ML\Deferred; use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; use Rubix\ML\NeuralNet\Optimizers\Stochastic\Stochastic; use Rubix\ML\NeuralNet\Initializers\Constant\Constant; +use Rubix\ML\NeuralNet\Parameters\Parameter as TrainableParameter; +use Rubix\ML\Exceptions\InvalidArgumentException; +use Rubix\ML\Exceptions\RuntimeException as RubixRuntimeException; use PHPUnit\Framework\TestCase; #[Group('Layers')] @@ -32,6 +38,83 @@ class BatchNormTest extends TestCase protected BatchNorm $layer; + /** + * @return array + */ + public static function initializeProvider() : array + { + return [ + 'fanIn=3' => [3], + ]; + } + + /** + * @return array + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [-0.1251222, 1.2825030, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974157, -1.4101899, 0.6127743], + ]], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0644587, 0.0272710, 0.0371877], + [0.1137590, -0.1099670, -0.0037919], + [-0.1190978, -0.0108703, 0.1299681], + ]], + ]; + } + + /** + * @return array + */ + public static function inferProvider() : array + { + return [ + 'expectedInfer' => [[ + [-0.1251222, 1.2825031, -1.1573808], + [-0.6708631, -0.7427414, 1.4136046], + [0.7974158, -1.4101899, 0.6127743], + ]], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0644587, 0.0272710, 0.0371877], + [0.1137590, -0.1099670, -0.0037919], + [-0.1190978, -0.0108703, 0.1299681], + ]], + ]; + } + + /** + * @return array + */ + public static function badDecayProvider() : array + { + return [ + 'negative' => [-0.01], + 'greaterThanOne' => [1.01], + ]; + } + protected function setUp() : void { $this->fanIn = 3; @@ -59,45 +142,161 @@ protected function setUp() : void ); } - public function testInitializeForwardBackInfer() : void + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void { - $this->layer->initialize($this->fanIn); + self::assertEquals( + 'Batch Norm (decay: 0.9, beta initializer: Constant (value: 0), gamma initializer: Constant (value: 1))', + (string) $this->layer + ); + } - self::assertEquals($this->fanIn, $this->layer->width()); + #[Test] + #[TestDox('Initializes width and returns fan out')] + #[DataProvider('initializeProvider')] + public function testInitialize(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + self::assertEquals($fanIn, $fanOut); + self::assertEquals($fanIn, $this->layer->width()); + } - $expected = [ - [-0.1251222, 1.2825030, -1.1573808], - [-0.6708631, -0.7427414, 1.4136046], - [0.7974157, -1.4101899, 0.6127743], - ]; + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize($this->fanIn); $forward = $this->layer->forward($this->input); self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize($this->fanIn); + $this->layer->forward($this->input); $gradient = $this->layer->back( prevGradient: $this->prevGrad, optimizer: $this->optimizer )->compute(); - $expected = [ - [-0.06445877134888621, 0.027271018647605647, 0.03718775270128047], - [0.11375900761901864, -0.10996704069838469, -0.0037919669206339162], - [-0.11909780311643131, -0.01087038130262698, 0.1299681844190583], - ]; - self::assertInstanceOf(NDArray::class, $gradient); self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Infers using running statistics')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + // Perform a forward pass to set running mean/variance + $this->layer->forward($this->input); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Throws when width is requested before initialization')] + public function testWidthThrowsBeforeInitialize() : void + { + $layer = new BatchNorm(); + $this->expectException(RubixRuntimeException::class); + $layer->width(); + } + + #[Test] + #[TestDox('Constructor rejects invalid decay values')] + #[DataProvider('badDecayProvider')] + public function testConstructorRejectsInvalidDecay(float $decay) : void + { + $this->expectException(InvalidArgumentException::class); + new BatchNorm(decay: $decay); + } + + #[Test] + #[TestDox('Yields trainable parameters beta and gamma')] + public function testParameters() : void + { + $this->layer->initialize($this->fanIn); + + $params = iterator_to_array($this->layer->parameters()); -// $expected = [ -// [-0.1260783, 1.2804902385302876, -1.1575619225761131], -// [-0.6718883801743488, -0.7438003494787433, 1.4135587296530918], -// [0.7956943312039361, -1.4105786650534555, 0.6111643338495193], -// ]; -// -// $infer = $this->layer->infer($this->input); -// -// self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); -// self::assertTrue(true); + self::assertArrayHasKey('beta', $params); + self::assertArrayHasKey('gamma', $params); + self::assertInstanceOf(TrainableParameter::class, $params['beta']); + self::assertInstanceOf(TrainableParameter::class, $params['gamma']); + + self::assertEquals([0.0, 0.0, 0.0], $params['beta']->param()->toArray()); + self::assertEquals([1.0, 1.0, 1.0], $params['gamma']->param()->toArray()); + } + + #[Test] + #[TestDox('Restores parameters from array')] + public function testRestore() : void + { + $this->layer->initialize($this->fanIn); + + $betaNew = new TrainableParameter(NumPower::full([3], 2.0)); + $gammaNew = new TrainableParameter(NumPower::full([3], 3.0)); + + $this->layer->restore([ + 'beta' => $betaNew, + 'gamma' => $gammaNew, + ]); + + $restored = iterator_to_array($this->layer->parameters()); + self::assertSame($betaNew, $restored['beta']); + self::assertSame($gammaNew, $restored['gamma']); + self::assertEquals([2.0, 2.0, 2.0], $restored['beta']->param()->toArray()); + self::assertEquals([3.0, 3.0, 3.0], $restored['gamma']->param()->toArray()); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Compute forward-time caches manually to pass into gradient() + $input = $this->input; + $rows = $input->shape()[0]; + $meanArr = []; + $varArr = []; + $stdInvArr = []; + + for ($i = 0; $i < $rows; $i++) { + $row = $input->toArray()[$i]; + $meanArr[$i] = NumPower::mean($row); + $varArr[$i] = NumPower::variance($row); + $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); + } + + $mean = NumPower::array($meanArr); + $stdInv = NumPower::array($stdInvArr); + + $xHat = NumPower::multiply( + NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), + $stdInv + ); + $xHat = NumPower::transpose($xHat, [1, 0]); + + // Use provided prevGrad as dOut and current gamma parameter + $dOut = ($this->prevGrad)(); + $gamma = iterator_to_array($this->layer->parameters())['gamma']->param(); + + $gradient = $this->layer->gradient($dOut, $gamma, $stdInv, $xHat); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } } From a44e86a9d4aa2f7e098861a094bf2b13d47d3e21 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:01:46 +0200 Subject: [PATCH 05/23] ML-396 Refactored `BatchNorm` layer to optimize normalization logic with `NumPower` utilities --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 34 +++++++++---------- .../Layers/BatchNorm/BatchNormTest.php | 4 +-- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index b7c170abb..1e29a81ec 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -190,29 +190,27 @@ public function forward(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - $rows = $input->shape()[0]; - $meanArr = []; - $varArr = []; - $stdInvArr = []; - - for ($i = 0; $i < $rows; $i++) { - $meanArr[$i] = NumPower::mean($input->toArray()[$i]); - $varArr[$i] = NumPower::variance($input->toArray()[$i]); - $stdInvArr[$i] = 1.0 / sqrt($varArr[$i]); - } + // Shape: [m, n] + [$m, $n] = $input->shape(); + + // Row-wise mean across features (axis 1), length m + $sum = NumPower::sum($input, 1); + $mean = NumPower::divide($sum, $n); - $mean = NumPower::array($meanArr); + // Center the input: broadcast mean to [m, n] + $centered = NumPower::subtract($input, NumPower::reshape($mean, [$m, 1])); - $variance = NumPower::array($varArr); + // Row-wise variance across features (axis 1) + $centeredSq = NumPower::multiply($centered, $centered); + $varSum = NumPower::sum($centeredSq, 1); + $variance = NumPower::divide($varSum, $n); $variance = NumPower::clip($variance, EPSILON, PHP_FLOAT_MAX); - $stdInv = NumPower::array($stdInvArr); + // Inverse std from clipped variance + $stdInv = NumPower::reciprocal(NumPower::sqrt($variance)); - $xHat = NumPower::multiply( - NumPower::subtract(NumPower::transpose($input, [1, 0]), $mean), - $stdInv - ); - $xHat = NumPower::transpose($xHat, [1, 0]); + // Normalize: (x - mean) * stdInv + $xHat = NumPower::multiply($centered, NumPower::reshape($stdInv, [$m, 1])); // Initialize running stats if needed if (!$this->mean or !$this->variance) { diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index 9f05ab47b..4912857e3 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -55,9 +55,9 @@ public static function forwardProvider() : array { return [ 'expectedForward' => [[ - [-0.1251222, 1.2825030, -1.1573808], + [-0.1251222, 1.2825031, -1.1573808], [-0.6708631, -0.7427414, 1.4136046], - [0.7974157, -1.4101899, 0.6127743], + [0.7974158, -1.4101899, 0.6127743], ]], ]; } From 5a3f5a718f2742ca9f7adcd66e6d5edfbbf5f405 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:10:52 +0200 Subject: [PATCH 06/23] ML-397 Enhanced `BatchNorm` layer with improved axis constants, numerical stability during inference, and gradient computation logic --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 28 +++++++++----------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 1e29a81ec..7b786b1f0 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -110,7 +110,8 @@ class BatchNorm implements Hidden, Parametric * * @var int */ - protected const int COLUMN_WISE = 1; + protected const int AXIS_SAMPLES = 0; + protected const int AXIS_FEATURES = 1; /** * @param float $decay @@ -218,7 +219,8 @@ public function forward(NDArray $input) : NDArray $this->variance = $variance; } - // Update running mean/variance: running = running*(1-decay) + current*decay + // Update running mean/variance using exponential moving average (EMA) + // Convention: running = running*(1 - decay) + current*decay $this->mean = NumPower::add( NumPower::multiply($this->mean, 1.0 - $this->decay), NumPower::multiply($mean, $this->decay) @@ -254,9 +256,11 @@ public function infer(NDArray $input) : NDArray // Number of rows $m = $input->shape()[0]; + // Use clipped variance for numerical stability during inference + $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); $xHat = NumPower::divide( NumPower::subtract($input, NumPower::reshape($this->mean, [$m, 1])), - NumPower::reshape(NumPower::sqrt($this->variance), [$m, 1]) + NumPower::reshape(NumPower::sqrt($varianceClipped), [$m, 1]) ); return NumPower::add( @@ -266,8 +270,6 @@ public function infer(NDArray $input) : NDArray ), $this->beta->param() ); - - return $return; } /** @@ -287,13 +289,13 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred } if (!$this->stdInv or !$this->xHat) { - throw new RuntimeException('Must perform forward pass before' - . ' backpropagating.'); + throw new RuntimeException('Must perform forward pass before backpropagating.'); } $dOut = $prevGradient(); - $dBeta = NumPower::sum($dOut, self::COLUMN_WISE); - $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::COLUMN_WISE); + // Sum across samples (axis 0) for parameter gradients + $dBeta = NumPower::sum($dOut, self::AXIS_SAMPLES); + $dGamma = NumPower::sum(NumPower::multiply($dOut, $this->xHat), self::AXIS_SAMPLES); $gamma = $this->gamma->param(); $this->beta->update($dBeta, $optimizer); @@ -308,8 +310,6 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred [$this, 'gradient'], [$dOut, $gamma, $stdInv, $xHat] ); - - return $return; } /** @@ -326,8 +326,8 @@ public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHat) : NDArray { $dXHat = NumPower::multiply($dOut, $gamma); - $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::COLUMN_WISE); - $dXHatSigma = NumPower::sum($dXHat, self::COLUMN_WISE); + $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); + $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); // Number of rows $m = $dOut->shape()[0]; @@ -343,8 +343,6 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray ), NumPower::reshape(NumPower::divide($stdInv, $m), [$m, 1]) ); - - return $return; } /** From ec3c2362ebc7c20511944b7eeb76b491e7d8e602 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:16:16 +0200 Subject: [PATCH 07/23] ML-397 Enhanced `BatchNorm` layer with improved axis constants, numerical stability during inference, and gradient computation logic --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 7b786b1f0..40eafbf3a 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -190,8 +190,7 @@ public function forward(NDArray $input) : NDArray if (!$this->beta or !$this->gamma) { throw new RuntimeException('Layer has not been initialized.'); } - - // Shape: [m, n] + [$m, $n] = $input->shape(); // Row-wise mean across features (axis 1), length m @@ -253,8 +252,7 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - // Number of rows - $m = $input->shape()[0]; + [$m, $n] = $input->shape(); // Use clipped variance for numerical stability during inference $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); @@ -329,8 +327,7 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); - // Number of rows - $m = $dOut->shape()[0]; + [$m, $n] = $dOut->shape(); // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) return NumPower::multiply( From 00955f981ec72df1d5fc2580a95bbd481131b3c3 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:43:32 +0200 Subject: [PATCH 08/23] ML-398 Improved `BatchNorm` behavior for varying batch sizes with additional tests and updated shape handling --- src/NeuralNet/Layers/BatchNorm/BatchNorm.php | 7 +- .../Layers/BatchNorm/BatchNormTest.php | 90 +++++++++++++++++++ 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php index 40eafbf3a..a15b1fac5 100644 --- a/src/NeuralNet/Layers/BatchNorm/BatchNorm.php +++ b/src/NeuralNet/Layers/BatchNorm/BatchNorm.php @@ -32,6 +32,7 @@ * @category Machine Learning * @package Rubix/ML * @author Andrew DalPino + * @author Samuel Akopyan */ class BatchNorm implements Hidden, Parametric { @@ -190,7 +191,7 @@ public function forward(NDArray $input) : NDArray if (!$this->beta or !$this->gamma) { throw new RuntimeException('Layer has not been initialized.'); } - + [$m, $n] = $input->shape(); // Row-wise mean across features (axis 1), length m @@ -252,7 +253,7 @@ public function infer(NDArray $input) : NDArray throw new RuntimeException('Layer has not been initialized.'); } - [$m, $n] = $input->shape(); + $m = $input->shape()[0]; // Use clipped variance for numerical stability during inference $varianceClipped = NumPower::clip($this->variance, EPSILON, PHP_FLOAT_MAX); @@ -327,7 +328,7 @@ public function gradient(NDArray $dOut, NDArray $gamma, NDArray $stdInv, NDArray $xHatSigma = NumPower::sum(NumPower::multiply($dXHat, $xHat), self::AXIS_FEATURES); $dXHatSigma = NumPower::sum($dXHat, self::AXIS_FEATURES); - [$m, $n] = $dOut->shape(); + $m = $dOut->shape()[0]; // Compute gradient per formula: dX = (dXHat * m - dXHatSigma - xHat * xHatSigma) * (stdInv / m) return NumPower::multiply( diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index 4912857e3..e926782f8 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -90,6 +90,30 @@ public static function inferProvider() : array ]; } + /** + * Additional inputs to validate behavior across different batch sizes. + * + * @return array + */ + public static function batchInputsProvider() : array + { + return [ + 'batch1x3' => [[ + [2.0, -1.0, 0.0], + ]], + 'batch2x3' => [[ + [1.0, 2.0, 3.0], + [3.0, 3.0, 3.0], + ]], + 'batch4x3' => [[ + [0.5, -0.5, 1.5], + [10.0, -10.0, 0.0], + [7.2, 3.3, -2.4], + [-1.0, -2.0, 4.0], + ]], + ]; + } + /** * @return array */ @@ -205,6 +229,36 @@ public function testInfer(array $expected) : void self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); } + #[Test] + #[TestDox('Computes forward pass (row-wise) with zero mean and unit variance per sample for various batch sizes')] + #[DataProvider('batchInputsProvider')] + public function testForwardStatsMultipleBatches(array $input) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward(NumPower::array($input)); + $out = $forward->toArray(); + + // Check per-row mean ~ 0 and variance ~ 1 (allow 0 for degenerate rows) + $this->assertRowwiseStats($input, $out, true); + } + + #[Test] + #[TestDox('Infers (row-wise) with zero mean and unit variance per sample for various batch sizes')] + #[DataProvider('batchInputsProvider')] + public function testInferStatsMultipleBatches(array $input) : void + { + $this->layer->initialize($this->fanIn); + + // Perform a forward pass on the same input to initialize running stats + $this->layer->forward(NumPower::array($input)); + + $infer = $this->layer->infer(NumPower::array($input)); + $out = $infer->toArray(); + + $this->assertRowwiseStats($input, $out, false); + } + #[Test] #[TestDox('Throws when width is requested before initialization')] public function testWidthThrowsBeforeInitialize() : void @@ -299,4 +353,40 @@ public function testGradient(array $expected) : void self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } + + /** + * @param array> $inputRows + * @param array> $outRows + */ + private function assertRowwiseStats(array $inputRows, array $outRows, bool $checkMean) : void + { + foreach ($outRows as $i => $row) { + $mean = array_sum($row) / count($row); + $var = 0.0; + foreach ($row as $v) { + $var += ($v - $mean) * ($v - $mean); + } + $var /= count($row); + + $orig = $inputRows[$i]; + $origMean = array_sum($orig) / count($orig); + $origVar = 0.0; + foreach ($orig as $ov) { + $origVar += ($ov - $origMean) * ($ov - $origMean); + } + $origVar /= count($orig); + + $expectedVar = $origVar < 1e-12 ? 0.0 : 1.0; + + if ($checkMean) { + self::assertEqualsWithDelta(0.0, $mean, 1e-7); + } + + if ($expectedVar === 0.0) { + self::assertLessThan(5e-3, $var); + } else { + self::assertEqualsWithDelta(1.0, $var, 1e-6); + } + } + } } From 3fb79a919b06941e45dbf79a3c4f02ae2f4cc5c6 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 19:46:00 +0200 Subject: [PATCH 09/23] ML-398 Improved `BatchNorm` behavior for varying batch sizes with additional tests and updated shape handling --- tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php index e926782f8..dd5380941 100644 --- a/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php +++ b/tests/NeuralNet/Layers/BatchNorm/BatchNormTest.php @@ -11,6 +11,7 @@ use PHPUnit\Framework\Attributes\Test; use PHPUnit\Framework\Attributes\TestDox; use PHPUnit\Framework\Attributes\DataProvider; +use PHPUnit\Framework\MockObject\Rule\Parameters; use Rubix\ML\Deferred; use Rubix\ML\NeuralNet\Layers\BatchNorm\BatchNorm; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; @@ -383,7 +384,7 @@ private function assertRowwiseStats(array $inputRows, array $outRows, bool $chec } if ($expectedVar === 0.0) { - self::assertLessThan(5e-3, $var); + self::assertLessThan(1e-6, $var); } else { self::assertEqualsWithDelta(1.0, $var, 1e-6); } From b4e507d18aab5b8580ca17bc446fa11fa19679d7 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 22:53:33 +0200 Subject: [PATCH 10/23] ML-399 Added `Binary` output layer and comprehensive unit tests with interface definition for output layers. --- .../Layers/Base/Contracts/Output.php | 29 +++ src/NeuralNet/Layers/Binary/Binary.php | 222 ++++++++++++++++++ tests/NeuralNet/Layers/Binary/BinaryTest.php | 192 +++++++++++++++ 3 files changed, 443 insertions(+) create mode 100644 src/NeuralNet/Layers/Base/Contracts/Output.php create mode 100644 src/NeuralNet/Layers/Binary/Binary.php create mode 100644 tests/NeuralNet/Layers/Binary/BinaryTest.php diff --git a/src/NeuralNet/Layers/Base/Contracts/Output.php b/src/NeuralNet/Layers/Base/Contracts/Output.php new file mode 100644 index 000000000..49e11bb4b --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Output.php @@ -0,0 +1,29 @@ + + */ +interface Output extends Layer +{ + /** + * Compute the gradient and loss at the output. + * + * @param (string|int|float)[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return mixed[] + */ + public function back(array $labels, Optimizer $optimizer) : array; +} diff --git a/src/NeuralNet/Layers/Binary/Binary.php b/src/NeuralNet/Layers/Binary/Binary.php new file mode 100644 index 000000000..37b6f145b --- /dev/null +++ b/src/NeuralNet/Layers/Binary/Binary.php @@ -0,0 +1,222 @@ + + */ +class Binary implements Output +{ + /** + * The labels of either of the possible outcomes. + * + * @var float[] + */ + protected array $classes = [ + // + ]; + + /** + * The function that computes the loss of erroneous activations. + * + * @var ClassificationLoss + */ + protected ClassificationLoss $costFn; + + /** + * The sigmoid activation function. + * + * @var Sigmoid + */ + protected Sigmoid $sigmoid; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param string[] $classes + * @param ClassificationLoss|null $costFn + * @throws InvalidArgumentException + */ + public function __construct(array $classes, ?ClassificationLoss $costFn = null) + { + $classes = array_values(array_unique($classes)); + + if (count($classes) !== 2) { + throw new InvalidArgumentException('Number of classes must be 2, ' . count($classes) . ' given.'); + } + + $classes = [ + $classes[0] => 0.0, + $classes[1] => 1.0, + ]; + + $this->classes = $classes; + $this->costFn = $costFn ?? new CrossEntropy(); + $this->sigmoid = new Sigmoid(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return 1; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + if ($fanIn !== 1) { + throw new InvalidArgumentException("Fan in must be equal to 1, $fanIn given."); + } + + return 1; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->sigmoid->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->sigmoid->activate($input); + } + + /** + * Compute the gradient and loss at the output. + * + * @param string[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return (Deferred|float)[] + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $expected = []; + + foreach ($labels as $label) { + $expected[] = $this->classes[$label]; + } + + $expected = NumPower::array([$expected]); + + $input = $this->input; + $output = $this->output; + + $gradient = new Deferred([$this, 'gradient'], [$input, $output, $expected]); + + $loss = $this->costFn->compute($output, $expected); + + $this->input = $this->output = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $output + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, NDArray $expected) : NDArray + { + $n = $output->shape()[1]; + + if ($this->costFn instanceof CrossEntropy) { + return NumPower::divide( + NumPower::subtract($output, $expected), + $n + ); + } + + $dLoss = NumPower::divide( + $this->costFn->differentiate($output, $expected), + $n + ); + + return NumPower::multiply( + $this->sigmoid->differentiate($output), + $dLoss + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Binary (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Binary/BinaryTest.php b/tests/NeuralNet/Layers/Binary/BinaryTest.php new file mode 100644 index 000000000..645d7c86b --- /dev/null +++ b/tests/NeuralNet/Layers/Binary/BinaryTest.php @@ -0,0 +1,192 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + [ + [0.7310585, 0.9241418, 0.4750207], + ], + ], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + [ + [ + [0.2436861, -0.0252860, 0.1583402], + ], + ], + ]; + } + + /** + * @return array}> + */ + public static function badClassesProvider() : array + { + return [ + 'empty' => [[]], + 'single' => [['hot']], + 'duplicatesToOne' => [['hot', 'hot']], + 'threeUnique' => [['hot', 'cold', 'warm']], + ]; + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + ]); + + $this->labels = ['hot', 'cold', 'hot']; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Binary(classes: ['hot', 'cold'], costFn: new CrossEntropy()); + } + + #[Test] + #[TestDox('Returns string representation')] + public function testToString() : void + { + $this->layer->initialize(1); + + self::assertEquals('Binary (cost function: Cross Entropy)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes and reports width')] + public function testInitializeWidth() : void + { + $this->layer->initialize(1); + self::assertEquals(1, $this->layer->width()); + } + + #[Test] + #[TestDox('Constructor rejects invalid classes arrays')] + #[DataProvider('badClassesProvider')] + public function testConstructorRejectsInvalidClasses(array $classes) : void + { + $this->expectException(InvalidArgumentException::class); + new Binary(classes: $classes, costFn: new CrossEntropy()); + } + + #[Test] + #[TestDox('Constructor accepts classes arrays that dedupe to exactly 2 labels')] + public function testConstructorAcceptsDuplicateClassesThatDedupeToTwo() : void + { + $layer = new Binary(classes: ['hot', 'cold', 'hot'], costFn: new CrossEntropy()); + // Should initialize without throwing and report correct width + $layer->initialize(1); + self::assertEquals(1, $layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(1); + + $forward = $this->layer->forward($this->input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expectedGradient) : void + { + $this->layer->initialize(1); + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back(labels: $this->labels, optimizer: $this->optimizer); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient directly given input, output, expected, and batch size')] + #[DataProvider('backProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(1); + + $input = $this->input; + $output = $this->layer->forward($input); + + // Build expected NDArray (1, batch) using the Binary classes mapping: hot=>0.0, cold=>1.0 + $expected = []; + foreach ($this->labels as $label) { + $expected[] = ($label === 'cold') ? 1.0 : 0.0; + } + $expected = NumPower::array([$expected]); + + $batchSize = count($this->labels); + + $gradient = $this->layer->gradient($input, $output, $expected, $batchSize); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(1); + + $infer = $this->layer->infer($this->input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From d5f7c5778fb4cf572b065a4a8dc61db0213082ec Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 7 Dec 2025 23:18:14 +0200 Subject: [PATCH 11/23] ML-400 Added `Continuous` output layer with complete interface implementation and unit tests --- .../Layers/Continuous/Continuous.php | 157 +++++++++++++++++ .../Layers/Continuous/ContinuousTest.php | 159 ++++++++++++++++++ 2 files changed, 316 insertions(+) create mode 100644 src/NeuralNet/Layers/Continuous/Continuous.php create mode 100644 tests/NeuralNet/Layers/Continuous/ContinuousTest.php diff --git a/src/NeuralNet/Layers/Continuous/Continuous.php b/src/NeuralNet/Layers/Continuous/Continuous.php new file mode 100644 index 000000000..7a07e9735 --- /dev/null +++ b/src/NeuralNet/Layers/Continuous/Continuous.php @@ -0,0 +1,157 @@ + + */ +class Continuous implements Output +{ + /** + * The function that computes the loss of erroneous activations. + * + * @var RegressionLoss + */ + protected RegressionLoss $costFn; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param RegressionLoss|null $costFn + */ + public function __construct(?RegressionLoss $costFn = null) + { + $this->costFn = $costFn ?? new LeastSquares(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return 1; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + if ($fanIn !== 1) { + throw new InvalidArgumentException("Fan in must be equal to 1, $fanIn given."); + } + + return 1; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $this->input = $input; + + return $input; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Compute the gradient and loss at the output. + * + * @param (int|float)[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return (Deferred|float)[] + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $expected = NumPower::array([$labels]); + + $input = $this->input; + + $gradient = new Deferred([$this, 'gradient'], [$input, $expected]); + + $loss = $this->costFn->compute($input, $expected); + + $this->input = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $expected) : NDArray + { + $n = $input->shape()[1]; + + return NumPower::divide( + $this->costFn->differentiate($input, $expected), + $n + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Continuous (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Continuous/ContinuousTest.php b/tests/NeuralNet/Layers/Continuous/ContinuousTest.php new file mode 100644 index 000000000..39592cdcb --- /dev/null +++ b/tests/NeuralNet/Layers/Continuous/ContinuousTest.php @@ -0,0 +1,159 @@ + + */ + public static function forwardProvider() : array + { + return [ + [ + [ + [2.5, 0.0, -6.0], + ], + ], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + [ + [ + [0.8333333, 0.8333333, -32.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [2.5, 0.0, -6.0], + ]); + + $this->labels = [0.0, -2.5, 90.0]; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Continuous(new LeastSquares()); + } + + #[Test] + #[TestDox('Returns string representation')] + public function testToString() : void + { + $this->layer->initialize(1); + + self::assertEquals('Continuous (cost function: Least Squares)', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes and reports width')] + public function testInitializeWidth() : void + { + $this->layer->initialize(1); + self::assertEquals(1, $this->layer->width()); + } + + #[Test] + #[TestDox('Initialize rejects fan-in not equal to 1')] + public function testInitializeRejectsInvalidFanIn() : void + { + $this->expectException(InvalidArgumentException::class); + $this->layer->initialize(2); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(1); + + $forward = $this->layer->forward($this->input); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('gradientProvider')] + public function testBack(array $expectedGradient) : void + { + $this->layer->initialize(1); + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back(labels: $this->labels, optimizer: $this->optimizer); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient directly given input and expected')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(1); + + $input = $this->input; + $expected = NumPower::array([$this->labels]); + + $gradient = $this->layer->gradient($input, $expected); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('forwardProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(1); + + $infer = $this->layer->infer($this->input); + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From cca97f96faaa213ba8f0d1b7a9e119c0925ad762 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 10 Dec 2025 20:23:52 +0200 Subject: [PATCH 12/23] ML-392 Added `Dense` hidden layer implementation with complete forward/backward passes --- docs/neural-network/hidden-layers/dense.md | 6 +- src/NeuralNet/Layers/Dense/Dense.php | 348 +++++++++++++++++++++ tests/NeuralNet/Layers/Dense/DenseTest.php | 308 ++++++++++++++++++ 3 files changed, 659 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Dense/Dense.php create mode 100644 tests/NeuralNet/Layers/Dense/DenseTest.php diff --git a/docs/neural-network/hidden-layers/dense.md b/docs/neural-network/hidden-layers/dense.md index cf4a7bd4c..db382d0a0 100644 --- a/docs/neural-network/hidden-layers/dense.md +++ b/docs/neural-network/hidden-layers/dense.md @@ -1,4 +1,4 @@ -[source] +[source] # Dense Dense (or *fully connected*) hidden layers are layers of neurons that connect to each node in the previous layer by a parameterized synapse. They perform a linear transformation on their input and are usually followed by an [Activation](activation.md) layer. The majority of the trainable parameters in a standard feed forward neural network are contained within Dense hidden layers. @@ -14,9 +14,9 @@ Dense (or *fully connected*) hidden layers are layers of neurons that connect to ## Example ```php -use Rubix\ML\NeuralNet\Layers\Dense; +use Rubix\ML\NeuralNet\Layers\Dense\Dense; use Rubix\ML\NeuralNet\Initializers\He; use Rubix\ML\NeuralNet\Initializers\Constant; $layer = new Dense(100, 1e-4, true, new He(), new Constant(0.0)); -``` \ No newline at end of file +``` diff --git a/src/NeuralNet/Layers/Dense/Dense.php b/src/NeuralNet/Layers/Dense/Dense.php new file mode 100644 index 000000000..ee62d1e75 --- /dev/null +++ b/src/NeuralNet/Layers/Dense/Dense.php @@ -0,0 +1,348 @@ + + */ +class Dense implements Hidden, Parametric +{ + /** + * The number of nodes in the layer. + * + * @var positive-int + */ + protected int $neurons; + + /** + * The amount of L2 regularization applied to the weights. + * + * @var float + */ + protected float $l2Penalty; + + /** + * Should the layer include a bias parameter? + * + * @var bool + */ + protected bool $bias; + + /** + * The weight initializer. + * + * @var Initializer + */ + protected Initializer $weightInitializer; + + /** + * The bias initializer. + * + * @var Initializer + */ + protected Initializer $biasInitializer; + + /** + * The weights. + * + * @var Parameter|null + */ + protected ?Parameter $weights = null; + + /** + * The biases. + * + * @var Parameter|null + */ + protected ?Parameter $biases = null; + + /** + * The memorized inputs to the layer. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param int $neurons + * @param float $l2Penalty + * @param bool $bias + * @param Initializer|null $weightInitializer + * @param Initializer|null $biasInitializer + * @throws InvalidArgumentException + */ + public function __construct( + int $neurons, + float $l2Penalty = 0.0, + bool $bias = true, + ?Initializer $weightInitializer = null, + ?Initializer $biasInitializer = null + ) { + if ($neurons < 1) { + throw new InvalidArgumentException("Number of neurons must be greater than 0, $neurons given."); + } + + if ($l2Penalty < 0.0) { + throw new InvalidArgumentException("L2 Penalty must be greater than 0, $l2Penalty given."); + } + + $this->neurons = $neurons; + $this->l2Penalty = $l2Penalty; + $this->bias = $bias; + $this->weightInitializer = $weightInitializer ?? new HeUniform(); + $this->biasInitializer = $biasInitializer ?? new Constant(0.0); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @return positive-int + */ + public function width() : int + { + return $this->neurons; + } + + /** + * Return the weight matrix. + * + * @internal + * + * @throws RuntimeException + * @return NDArray + */ + public function weights() : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + return $this->weights->param(); + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $this->neurons; + + $weights = $this->weightInitializer->initialize($fanIn, $fanOut); + + $this->weights = new Parameter($weights); + + if ($this->bias) { + // Initialize biases as a vector of length fanOut + $biasMat = $this->biasInitializer->initialize(1, $fanOut); + $biases = NumPower::flatten($biasMat); + + $this->biases = new Parameter($biases); + } + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + * @internal + * + */ + public function forward(NDArray $input) : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + $output = NumPower::matmul($this->weights->param(), $input); + + if ($this->biases) { + // Reshape bias vector [fanOut] to column [fanOut, 1] to match output [fanOut, n] + $bias = NumPower::reshape($this->biases->param(), [$this->neurons, 1]); + // Manual “broadcast”: [neurons, n] + [neurons, 1] + $output = NumPower::add($output, $bias); + } + + $this->input = $input; + + return $output; + } + + /** + * Compute an inference pass through the layer. + * + * @param NDArray $input + * @return NDArray + * @internal + * + */ + public function infer(NDArray $input) : NDArray + { + if (!$this->weights) { + throw new RuntimeException('Layer is not initialized'); + } + + $output = NumPower::matmul($this->weights->param(), $input); + + if ($this->biases) { + // Reshape bias vector [fanOut] to column [fanOut, 1] to match output [fanOut, n] + $bias = NumPower::reshape($this->biases->param(), [$this->neurons, 1]); + // Manual “broadcast”: [neurons, n] + [neurons, 1] + $output = NumPower::add($output, $bias); + } + + return $output; + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->weights) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + $inputT = NumPower::transpose($this->input, [1, 0]); + + $dW = NumPower::matmul($dOut, $inputT); + + $weights = $this->weights->param(); + + if ($this->l2Penalty) { + $dW = NumPower::add( + $dW, + NumPower::multiply($weights, $this->l2Penalty) + ); + } + + $this->weights->update($dW, $optimizer); + + if ($this->biases) { + // Sum gradients over the batch dimension to obtain a bias gradient + // with the same shape as the bias vector [neurons] + $dB = NumPower::sum($dOut, axis: 1); + + $this->biases->update($dB, $optimizer); + } + + $this->input = null; + + return new Deferred([$this, 'gradient'], [$weights, $dOut]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $weights + * @param NDArray $dOut + * @return NDArray + */ + public function gradient(NDArray $weights, NDArray $dOut) : NDArray + { + $weightsT = NumPower::transpose($weights, [1, 0]); + + return NumPower::matmul($weightsT, $dOut); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->weights) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'weights' => $this->weights; + + if ($this->biases) { + yield 'biases' => $this->biases; + } + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->weights = $parameters['weights']; + $this->biases = $parameters['biases'] ?? null; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Dense (neurons: {$this->neurons}, l2 penalty: {$this->l2Penalty}," + . ' bias: ' . Params::toString($this->bias) . ',' + . " weight initializer: {$this->weightInitializer}," + . " bias initializer: {$this->biasInitializer})"; + } +} diff --git a/tests/NeuralNet/Layers/Dense/DenseTest.php b/tests/NeuralNet/Layers/Dense/DenseTest.php new file mode 100644 index 000000000..d8c920aa3 --- /dev/null +++ b/tests/NeuralNet/Layers/Dense/DenseTest.php @@ -0,0 +1,308 @@ +>, array, array>}> + */ + public static function forwardProvider() : array + { + return [ + [ + // weights 2x3 + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ], + // biases length-2 + [0.0, 0.0], + // expected forward output 2x3 for the fixed input in setUp() + // input = [ + // [1.0, 2.5, -0.1], + // [0.1, 0.0, 3.0], + // [0.002, -6.0, -0.5], + // ]; + // so W * input = first two rows of input + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + ], + ], + ]; + } + + /** + * @return array>, array, array>, array>}> + */ + public static function backProvider() : array + { + return [ + [ + // weights 2x3 + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ], + // biases length-2 + [0.0, 0.0], + // prev gradient 2x3 + [ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ], + // expected gradient for previous layer 3x3 + [ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + [0.0, 0.0, 0.0], + ], + ], + ]; + } + + protected function setUp() : void + { + $this->fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Dense( + neurons: 2, + l2Penalty: 0.0, + bias: true, + weightInitializer: new HeUniform(), + biasInitializer: new Constant(0.0) + ); + + srand(self::RANDOM_SEED); + } + + #[Test] + #[TestDox('Throws an exception for invalid constructor arguments')] + public function testConstructorValidation() : void + { + $this->expectException(InvalidArgumentException::class); + + new Dense( + neurons: 0, + l2Penalty: -0.1, + bias: true, + weightInitializer: new HeUniform(), + biasInitializer: new Constant(0.0) + ); + } + + #[Test] + #[TestDox('Computes forward activations for fixed weights and biases')] + #[DataProvider('forwardProvider')] + public function testForward(array $weights, array $biases, array $expected) : void + { + $this->layer->initialize($this->fanIn); + self::assertEquals(2, $this->layer->width()); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method weights() returns the restored weight matrix')] + public function testWeightsReturnsExpectedValues() : void + { + $this->layer->initialize($this->fanIn); + + $weightsArray = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weightsArray)), + 'biases' => new TrainableParameter(NumPower::array([0.0, 0.0])), + ]); + + $weights = $this->layer->weights(); + + self::assertEqualsWithDelta($weightsArray, $weights->toArray(), 1e-7); + } + + #[Test] + #[TestDox('width() returns the number of neurons')] + public function testWidthReturnsNeuronsCount() : void + { + // Layer is constructed in setUp() with neurons: 2 + self::assertSame(2, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes backpropagated gradients for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $weights, array $biases, array $prevGrad, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $prevGradNd = NumPower::array($prevGrad); + + // Forward pass to set internal input cache + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: new Deferred(fn: fn () => $prevGradNd), + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations equal to forward for fixed parameters')] + #[DataProvider('forwardProvider')] + public function testInfer(array $weights, array $biases, array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method restore() correctly replaces layer parameters')] + public function testRestoreReplacesParameters() : void + { + $this->layer->initialize($this->fanIn); + + // Use the same deterministic weights and biases as in forwardProvider + $weights = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $biases = [0.0, 0.0]; + + $expected = [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + ]; + + $this->layer->restore([ + 'weights' => new TrainableParameter(NumPower::array($weights)), + 'biases' => new TrainableParameter(NumPower::array($biases)), + ]); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method parameters() yields restored weights and biases')] + public function testParametersReturnsRestoredParameters() : void + { + $this->layer->initialize($this->fanIn); + + $weightsArray = [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + ]; + + $biasesArray = [0.0, 0.0]; + + $weightsParam = new TrainableParameter(NumPower::array($weightsArray)); + $biasesParam = new TrainableParameter(NumPower::array($biasesArray)); + + $this->layer->restore([ + 'weights' => $weightsParam, + 'biases' => $biasesParam, + ]); + + $params = iterator_to_array($this->layer->parameters()); + + self::assertArrayHasKey('weights', $params); + self::assertArrayHasKey('biases', $params); + + self::assertSame($weightsParam, $params['weights']); + self::assertSame($biasesParam, $params['biases']); + + self::assertEqualsWithDelta($weightsArray, $params['weights']->param()->toArray(), 1e-7); + self::assertEqualsWithDelta($biasesArray, $params['biases']->param()->toArray(), 1e-7); + } + + #[Test] + #[TestDox('It returns correct string representation')] + public function testToStringReturnsCorrectValue() : void + { + $expected = 'Dense (neurons: 2, l2 penalty: 0, bias: true, weight initializer: He Uniform, bias initializer: Constant (value: 0))'; + + self::assertSame($expected, (string) $this->layer); + } +} From 9767a1fdc32a007f7569535b6ff1f4319f644d04 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 10 Dec 2025 23:53:18 +0200 Subject: [PATCH 13/23] ML-401 Added `Dropout` hidden layer implementation with forward/inference/backward passes, unit tests, and documentation updates --- docs/neural-network/hidden-layers/dropout.md | 6 +- src/NeuralNet/Layers/Dropout/Dropout.php | 208 ++++++++++++++++++ .../NeuralNet/Layers/Dropout/DropoutTest.php | 143 ++++++++++++ 3 files changed, 354 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Dropout/Dropout.php create mode 100644 tests/NeuralNet/Layers/Dropout/DropoutTest.php diff --git a/docs/neural-network/hidden-layers/dropout.md b/docs/neural-network/hidden-layers/dropout.md index 566f83bad..28414f8ca 100644 --- a/docs/neural-network/hidden-layers/dropout.md +++ b/docs/neural-network/hidden-layers/dropout.md @@ -1,4 +1,4 @@ -[source] +[source] # Dropout Dropout is a regularization technique to reduce overfitting in neural networks by preventing complex co-adaptations on training data. It works by temporarily disabling output nodes during each training pass. It also acts as an efficient way of performing model averaging with the parameters of neural networks. @@ -10,10 +10,10 @@ Dropout is a regularization technique to reduce overfitting in neural networks b ## Example ```php -use Rubix\ML\NeuralNet\Layers\Dropout; +use Rubix\ML\NeuralNet\Layers\Dropout\Dropout; $layer = new Dropout(0.2); ``` ## References -[^1]: N. Srivastava et al. (2014). Dropout: A Simple Way to Prevent Neural Networks from Overfitting. \ No newline at end of file +[^1]: N. Srivastava et al. (2014). Dropout: A Simple Way to Prevent Neural Networks from Overfitting. diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php new file mode 100644 index 000000000..54abaf861 --- /dev/null +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -0,0 +1,208 @@ += 1.0) { + throw new InvalidArgumentException("Ratio must be between 0 and 1, $ratio given."); + } + + $this->ratio = $ratio; + $this->scale = 1.0 / (1.0 - $ratio); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray|null $mask Custom dropout mask to use instead of generating one. + * @return NDArray + */ + public function forward(NDArray $input, ?NDArray $mask = null) : NDArray + { + if ($mask === null) { + // Build dropout mask using PHP's RNG. Each unit is kept with + // probability (1 - ratio) and scaled by $this->scale. + $inputArray = $input->toArray(); + + $maskArray = []; + + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $_value) { + $u = rand() / getrandmax(); + + $maskArray[$i][$j] = $u > $this->ratio ? $this->scale : 0.0; + } + } + + $mask = NumPower::array($maskArray); + } + + $output = NumPower::multiply($input, $mask); + + $this->mask = $mask; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Calculate the gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->mask) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + $mask = $this->mask; + + $this->mask = null; + + return new Deferred([$this, 'gradient'], [$prevGradient, $mask]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param NDArray $mask + * @return NDArray + */ + public function gradient(Deferred $prevGradient, NDArray $mask) : NDArray + { + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + return NumPower::multiply($dOut, $mask); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Dropout (ratio: {$this->ratio})"; + } +} diff --git a/tests/NeuralNet/Layers/Dropout/DropoutTest.php b/tests/NeuralNet/Layers/Dropout/DropoutTest.php new file mode 100644 index 000000000..337466986 --- /dev/null +++ b/tests/NeuralNet/Layers/Dropout/DropoutTest.php @@ -0,0 +1,143 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Dropout(0.5); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('forward() returns an NDArray with the same shape as the input')] + public function testForward() : void + { + $this->layer->initialize($this->fanIn); + + // Deterministic mask so that forward output is predictable + $mask = NumPower::array([ + [2.0, 2.0, 2.0], + [2.0, 0.0, 2.0], + [2.0, 2.0, 0.0], + ]); + + $forward = $this->layer->forward($this->input, $mask); + + $expected = [ + [2.0, 5.0, -0.2], + [0.2, 0.0, 6.0], + [0.004, -12.0, 0.0], + ]; + + self::assertSame($this->input->shape(), $forward->shape()); + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates gradients using the same dropout mask')] + public function testBack() : void + { + $this->layer->initialize($this->fanIn); + + // Use the same deterministic mask as in testForward so that the + // gradient is fully predictable: grad = prevGrad * mask. + $mask = NumPower::array([ + [2.0, 2.0, 2.0], + [2.0, 0.0, 2.0], + [2.0, 2.0, 0.0], + ]); + + // Forward pass to set internal mask cache + $this->layer->forward($this->input, $mask); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $expected = [ + [0.5, 1.4, 0.2], + [1.0, 0.0, 0.02], + [0.5, 0.2, 0.0], + ]; + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Inference pass leaves inputs unchanged')] + public function testInfer() : void + { + $this->layer->initialize($this->fanIn); + + $expected = [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]; + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 8d7c938b2578e1cc747b9eed5c513c252edb7a4b Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Thu, 11 Dec 2025 16:37:32 +0200 Subject: [PATCH 14/23] ML-392 Refactored `Dropout` layer to replace custom mask generation with `NumPower` utilities --- src/NeuralNet/Layers/Dropout/Dropout.php | 26 +-- .../NeuralNet/Layers/Dropout/DropoutTest.php | 210 ++++++++++++++---- 2 files changed, 181 insertions(+), 55 deletions(-) diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php index 54abaf861..ce2409f7d 100644 --- a/src/NeuralNet/Layers/Dropout/Dropout.php +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -111,28 +111,20 @@ public function initialize(int $fanIn) : int * @internal * * @param NDArray $input - * @param NDArray|null $mask Custom dropout mask to use instead of generating one. * @return NDArray */ - public function forward(NDArray $input, ?NDArray $mask = null) : NDArray + public function forward(NDArray $input) : NDArray { - if ($mask === null) { - // Build dropout mask using PHP's RNG. Each unit is kept with - // probability (1 - ratio) and scaled by $this->scale. - $inputArray = $input->toArray(); + // Build dropout mask using NumPower's uniform RNG. Each unit is kept + // with probability (1 - ratio) and scaled by $this->scale. + $shape = $input->shape(); - $maskArray = []; + // Uniform random numbers in [0, 1) with same shape as input + $rand = NumPower::uniform($shape, 0.0, 1.0); - foreach ($inputArray as $i => $row) { - foreach ($row as $j => $_value) { - $u = rand() / getrandmax(); - - $maskArray[$i][$j] = $u > $this->ratio ? $this->scale : 0.0; - } - } - - $mask = NumPower::array($maskArray); - } + // mask = (rand > ratio) * scale + $mask = NumPower::greater($rand, $this->ratio); + $mask = NumPower::multiply($mask, $this->scale); $output = NumPower::multiply($input, $mask); diff --git a/tests/NeuralNet/Layers/Dropout/DropoutTest.php b/tests/NeuralNet/Layers/Dropout/DropoutTest.php index 337466986..47cf1ece1 100644 --- a/tests/NeuralNet/Layers/Dropout/DropoutTest.php +++ b/tests/NeuralNet/Layers/Dropout/DropoutTest.php @@ -7,10 +7,12 @@ use NDArray; use NumPower; use PHPUnit\Framework\Attributes\CoversClass; +use PHPUnit\Framework\Attributes\DataProvider; use PHPUnit\Framework\Attributes\Group; use PHPUnit\Framework\Attributes\Test; use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Deferred; +use Rubix\ML\Exceptions\InvalidArgumentException; use Rubix\ML\NeuralNet\Layers\Dropout\Dropout; use Rubix\ML\NeuralNet\Optimizers\Base\Optimizer; use Rubix\ML\NeuralNet\Optimizers\Stochastic\Stochastic; @@ -20,8 +22,6 @@ #[CoversClass(Dropout::class)] class DropoutTest extends TestCase { - protected const int RANDOM_SEED = 0; - /** * @var positive-int */ @@ -58,6 +58,43 @@ protected function setUp() : void $this->layer = new Dropout(0.5); } + /** + * @return array + */ + public static function badRatioProvider() : array + { + return [ + 'zero' => [0.0], + 'negative' => [-0.1], + 'one' => [1.0], + 'greaterThanOne'=> [1.1], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + return [ + 'identityOnInput' => [[ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]], + ]; + } + + #[Test] + #[TestDox('Constructor rejects invalid ratio values')] + #[DataProvider('badRatioProvider')] + public function testConstructorRejectsInvalidRatio(float $ratio) : void + { + $this->expectException(InvalidArgumentException::class); + + new Dropout($ratio); + } + #[Test] #[TestDox('Initializes width equal to fan-in')] public function testInitializeSetsWidth() : void @@ -68,28 +105,51 @@ public function testInitializeSetsWidth() : void } #[Test] - #[TestDox('forward() returns an NDArray with the same shape as the input')] + #[TestDox('Method forward() applies dropout mask with correct shape and scaling')] public function testForward() : void { $this->layer->initialize($this->fanIn); - // Deterministic mask so that forward output is predictable - $mask = NumPower::array([ - [2.0, 2.0, 2.0], - [2.0, 0.0, 2.0], - [2.0, 2.0, 0.0], - ]); + $forward = $this->layer->forward($this->input); - $forward = $this->layer->forward($this->input, $mask); + $inputArray = $this->input->toArray(); + $forwardArray = $forward->toArray(); - $expected = [ - [2.0, 5.0, -0.2], - [0.2, 0.0, 6.0], - [0.004, -12.0, 0.0], - ]; + self::assertSameSize($inputArray, $forwardArray); + + $scale = 1.0 / (1.0 - 0.5); // ratio = 0.5 + + $nonZero = 0; + $total = 0; + + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $x) { + $y = $forwardArray[$i][$j]; + $total++; - self::assertSame($this->input->shape(), $forward->shape()); - self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + if (abs($x) < 1e-12) { + // If input is (near) zero, output should also be ~0 + self::assertEqualsWithDelta(0.0, $y, 1e-7); + continue; + } + + if (abs($y) < 1e-12) { + // Dropped unit + continue; + } + + $nonZero++; + + // Kept unit should be scaled input + self::assertEqualsWithDelta($x * $scale, $y, 1e-6); + } + } + + // Roughly (1 - ratio) of units should be non-zero; allow wide tolerance + $expectedKept = (1.0 - 0.5) * $total; + self::assertGreaterThan(0, $nonZero); + self::assertLessThan($total, $nonZero); + self::assertEqualsWithDelta($expectedKept, $nonZero, $total * 0.5); } #[Test] @@ -98,21 +158,104 @@ public function testBack() : void { $this->layer->initialize($this->fanIn); - // Use the same deterministic mask as in testForward so that the - // gradient is fully predictable: grad = prevGrad * mask. + // Forward pass to generate and store mask + $forward = $this->layer->forward($this->input); + $forwardArray = $forward->toArray(); + $inputArray = $this->input->toArray(); + + // Approximate mask from forward output: mask ≈ forward / input + $maskArray = []; + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $x) { + $y = $forwardArray[$i][$j]; + + if (abs($x) < 1e-12) { + $maskArray[$i][$j] = 0.0; + } else { + $maskArray[$i][$j] = $y / $x; + } + } + } + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $gradArray = $gradient->toArray(); + $prevGradArray = ($this->prevGrad)()->toArray(); + + // Expected gradient per element: prevGrad * mask for non-zero inputs. + // For zero inputs, the mask cannot be inferred from the forward output + // (forward is always 0 regardless of mask), so we accept the actual + // gradient value there. + $expectedGrad = []; + foreach ($prevGradArray as $i => $row) { + foreach ($row as $j => $g) { + if (abs($inputArray[$i][$j]) < 1e-12) { + $expectedGrad[$i][$j] = $gradArray[$i][$j]; + } else { + $expectedGrad[$i][$j] = $g * $maskArray[$i][$j]; + } + } + } + + self::assertEqualsWithDelta($expectedGrad, $gradArray, 1e-6); + } + + #[Test] + #[TestDox('Inference pass leaves inputs unchanged')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Method initialize() returns fan out equal to fan in')] + public function testInitializeReturnsFanOut() : void + { + $fanOut = $this->layer->initialize($this->fanIn); + + self::assertSame($this->fanIn, $fanOut); + } + + #[Test] + #[TestDox('Method width() returns the initialized width')] + public function testWidthAfterInitialize() : void + { + $this->layer->initialize($this->fanIn); + + self::assertSame($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Method gradient() multiplies previous gradient by the dropout mask')] + public function testGradient() : void + { + // Deterministic previous gradient (same shape as input) + $prevGradNd = NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + + // Same deterministic mask as used in testForward/testBack $mask = NumPower::array([ [2.0, 2.0, 2.0], [2.0, 0.0, 2.0], [2.0, 2.0, 0.0], ]); - // Forward pass to set internal mask cache - $this->layer->forward($this->input, $mask); + $prevGradient = new Deferred(fn: static function () use ($prevGradNd) : NDArray { + return $prevGradNd; + }); - $gradient = $this->layer->back( - prevGradient: $this->prevGrad, - optimizer: $this->optimizer - )->compute(); + $gradient = $this->layer->gradient($prevGradient, $mask); $expected = [ [0.5, 1.4, 0.2], @@ -120,24 +263,15 @@ public function testBack() : void [0.5, 0.2, 0.0], ]; - self::assertInstanceOf(NDArray::class, $gradient); self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); } #[Test] - #[TestDox('Inference pass leaves inputs unchanged')] - public function testInfer() : void + #[TestDox('It returns correct string representation')] + public function testToString() : void { - $this->layer->initialize($this->fanIn); - - $expected = [ - [1.0, 2.5, -0.1], - [0.1, 0.0, 3.0], - [0.002, -6.0, -0.5], - ]; + $expected = 'Dropout (ratio: 0.5)'; - $infer = $this->layer->infer($this->input); - - self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + self::assertSame($expected, (string) $this->layer); } } From 5ad0ed2da331f89ceb15cc92dd21ea3509653ef5 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Thu, 11 Dec 2025 19:41:29 +0200 Subject: [PATCH 15/23] ML-392 Added `Multiclass` output layer with complete interface, forward/inference/backward passes, unit tests --- src/NeuralNet/Layers/Dropout/Dropout.php | 1 + .../Layers/Multiclass/Multiclass.php | 229 ++++++++++++++++++ .../Layers/Multiclass/MulticlassTest.php | 217 +++++++++++++++++ 3 files changed, 447 insertions(+) create mode 100644 src/NeuralNet/Layers/Multiclass/Multiclass.php create mode 100644 tests/NeuralNet/Layers/Multiclass/MulticlassTest.php diff --git a/src/NeuralNet/Layers/Dropout/Dropout.php b/src/NeuralNet/Layers/Dropout/Dropout.php index ce2409f7d..45d88e57a 100644 --- a/src/NeuralNet/Layers/Dropout/Dropout.php +++ b/src/NeuralNet/Layers/Dropout/Dropout.php @@ -25,6 +25,7 @@ * @category Machine Learning * @package Rubix/ML * @author Andrew DalPino + * @author Samuel Akopyan */ class Dropout implements Hidden { diff --git a/src/NeuralNet/Layers/Multiclass/Multiclass.php b/src/NeuralNet/Layers/Multiclass/Multiclass.php new file mode 100644 index 000000000..b6e33a5ac --- /dev/null +++ b/src/NeuralNet/Layers/Multiclass/Multiclass.php @@ -0,0 +1,229 @@ + + */ +class Multiclass implements Output +{ + /** + * The unique class labels. + * + * @var string[] + */ + protected array $classes = [ + // + ]; + + /** + * The function that computes the loss of erroneous activations. + * + * @var ClassificationLoss + */ + protected ClassificationLoss $costFn; + + /** + * The softmax activation function. + * + * @var Softmax + */ + protected Softmax $softmax; + + /** + * The memorized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param string[] $classes + * @param ClassificationLoss|null $costFn + * @throws InvalidArgumentException + */ + public function __construct(array $classes, ?ClassificationLoss $costFn = null) + { + $classes = array_values(array_unique($classes)); + + if (count($classes) < 2) { + throw new InvalidArgumentException('Number of classes' + . ' must be greater than 1, ' . count($classes) + . ' given.'); + } + + $this->classes = $classes; + $this->costFn = $costFn ?? new CrossEntropy(); + $this->softmax = new Softmax(); + } + + /** + * Return the width of the layer. + * + * @return positive-int + */ + public function width() : int + { + return max(1, count($this->classes)); + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @throws InvalidArgumentException + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = count($this->classes); + + if ($fanIn !== $fanOut) { + throw new InvalidArgumentException('Fan in must be' + . " equal to fan out, $fanOut expected but" + . " $fanIn given."); + } + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $output = $this->softmax->activate($input); + + $this->input = $input; + $this->output = $output; + + return $output; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->softmax->activate($input); + } + + /** + * Compute the gradient and loss at the output. + * + * @param string[] $labels + * @param Optimizer $optimizer + * @throws RuntimeException + * @return array + */ + public function back(array $labels, Optimizer $optimizer) : array + { + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass' + . ' before backpropagating.'); + } + + $expected = []; + + foreach ($labels as $label) { + $dist = []; + + foreach ($this->classes as $class) { + $dist[] = $class == $label ? 1.0 : 0.0; + } + + $expected[] = $dist; + } + + $expected = NumPower::array($expected); + + $input = $this->input; + $output = $this->output; + + $gradient = new Deferred([$this, 'gradient'], [$input, $output, $expected]); + + $loss = $this->costFn->compute($output, $expected); + + $this->input = $this->output = null; + + return [$gradient, $loss]; + } + + /** + * Calculate the gradient for the previous layer. + * + * @param NDArray $input + * @param NDArray $output + * @param NDArray $expected + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, NDArray $expected) : NDArray + { + $n = array_product($output->shape()); + + if ($this->costFn instanceof CrossEntropy) { + return NumPower::divide( + NumPower::subtract($output, $expected), + $n + ); + } + + $dLoss = NumPower::divide( + $this->costFn->differentiate($output, $expected), + $n + ); + + return NumPower::multiply( + $this->softmax->differentiate($output), + $dLoss + ); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Multiclass (cost function: {$this->costFn})"; + } +} diff --git a/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php b/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php new file mode 100644 index 000000000..a920a4272 --- /dev/null +++ b/tests/NeuralNet/Layers/Multiclass/MulticlassTest.php @@ -0,0 +1,217 @@ + + */ + public static function initializeProvider() : array + { + return [ + 'fanInEqualsClasses' => [3], + ]; + } + + /** + * @return array>}> + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [0.1719820, 0.7707700, 0.0572478], + [0.0498033, 0.0450639, 0.9051327], + [0.6219707, 0.0015385, 0.3764905], + ]], + ]; + } + + /** + * @return array>}> + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [-0.0920019, 0.0856411, 0.0063608], + [0.0055337, -0.1061040, 0.1005703], + [0.0691078, 0.00017093, -0.0692788], + ]], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + // Same expectations as forward + return self::forwardProvider(); + } + + protected function setUp() : void + { + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->labels = ['hot', 'cold', 'ice cold']; + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Multiclass( + classes: ['hot', 'cold', 'ice cold'], + costFn: new CrossEntropy() + ); + } + + #[Test] + #[TestDox('Constructor rejects invalid number of classes')] + public function testConstructorRejectsInvalidClasses() : void + { + $this->expectException(InvalidArgumentException::class); + + new Multiclass(classes: ['only-one-class']); + } + + #[Test] + #[TestDox('Method width() returns number of classes')] + public function testWidthReturnsNumberOfClasses() : void + { + self::assertSame(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Initializes and returns correct fan out')] + #[DataProvider('initializeProvider')] + public function testInitializeReturnsFanOut(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + + self::assertSame($fanIn, $fanOut); + self::assertSame(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward softmax probabilities')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize(3); + + self::assertEquals(3, $this->layer->width()); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns output gradient')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize(3); + + // Set internal caches + $this->layer->forward($this->input); + + [$computation, $loss] = $this->layer->back( + labels: $this->labels, + optimizer: $this->optimizer + ); + + self::assertInstanceOf(Deferred::class, $computation); + self::assertIsFloat($loss); + + $gradient = $computation->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('backProvider')] + public function testGradient(array $expectedGradient) : void + { + $this->layer->initialize(3); + + // Forward pass to obtain output probabilities + $output = $this->layer->forward($this->input); + + // Rebuild expected one-hot matrix the same way as Multiclass::back() + $expected = []; + + foreach ($this->labels as $label) { + $dist = []; + + foreach (['hot', 'cold', 'ice cold'] as $class) { + $dist[] = $class === $label ? 1.0 : 0.0; + } + + $expected[] = $dist; + } + + $expectedNd = NumPower::array($expected); + + $gradient = $this->layer->gradient($this->input, $output, $expectedNd); + + self::assertEqualsWithDelta($expectedGradient, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes infer softmax probabilities')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize(3); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('It returns correct string representation')] + public function testToStringReturnsCorrectValue() : void + { + $expected = 'Multiclass (cost function: Cross Entropy)'; + + self::assertSame($expected, (string) $this->layer); + } +} From a6c634f1493763c529d3592a876251030e03fffe Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 16:51:29 +0200 Subject: [PATCH 16/23] ML-392 Added `Noise` output layer with complete interface, forward/inference/backward passes, unit tests --- docs/neural-network/hidden-layers/noise.md | 6 +- src/NeuralNet/Layers/Noise/Noise.php | 157 ++++++++++++++++ tests/NeuralNet/Layers/Noise/NoiseTest.php | 208 +++++++++++++++++++++ 3 files changed, 368 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Noise/Noise.php create mode 100644 tests/NeuralNet/Layers/Noise/NoiseTest.php diff --git a/docs/neural-network/hidden-layers/noise.md b/docs/neural-network/hidden-layers/noise.md index 7979549af..4d29732cb 100644 --- a/docs/neural-network/hidden-layers/noise.md +++ b/docs/neural-network/hidden-layers/noise.md @@ -1,4 +1,4 @@ -[source] +[source] # Noise This layer adds random Gaussian noise to the inputs with a user-defined standard deviation. Noise added to neural network activations acts as a regularizer by indirectly adding a penalty to the weights through the cost function in the output layer. @@ -10,10 +10,10 @@ This layer adds random Gaussian noise to the inputs with a user-defined standard ## Example ```php -use Rubix\ML\NeuralNet\Layers\Noise; +use Rubix\ML\NeuralNet\Layers\Noise\Noise; $layer = new Noise(1e-3); ``` ## References -[^1]: C. Gulcehre et al. (2016). Noisy Activation Functions. \ No newline at end of file +[^1]: C. Gulcehre et al. (2016). Noisy Activation Functions. diff --git a/src/NeuralNet/Layers/Noise/Noise.php b/src/NeuralNet/Layers/Noise/Noise.php new file mode 100644 index 000000000..934265bb3 --- /dev/null +++ b/src/NeuralNet/Layers/Noise/Noise.php @@ -0,0 +1,157 @@ + + */ +class Noise implements Hidden +{ + /** + * The amount (standard deviation) of the gaussian noise to add to the inputs. + * + * @var float + */ + protected float $stdDev; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * @param float $stdDev + * @throws InvalidArgumentException + */ + public function __construct(float $stdDev) + { + if ($stdDev < 0.0) { + throw new InvalidArgumentException("Standard deviation must be 0 or greater, $stdDev given."); + } + + $this->stdDev = $stdDev; + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + $this->width = $fanOut; + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if ($this->stdDev === 0.0) { + return $input; + } + + $shape = $input->shape(); + + // Gaussian noise with mean 0 and standard deviation $this->stdDev + $noise = NumPower::normal(size: $shape, loc: 0.0, scale: $this->stdDev); + + return NumPower::add($input, $noise); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $input; + } + + /** + * Calculate the gradients of the layer and update the parameters. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + return $prevGradient; + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Noise (std dev: {$this->stdDev})"; + } +} diff --git a/tests/NeuralNet/Layers/Noise/NoiseTest.php b/tests/NeuralNet/Layers/Noise/NoiseTest.php new file mode 100644 index 000000000..4eaf11770 --- /dev/null +++ b/tests/NeuralNet/Layers/Noise/NoiseTest.php @@ -0,0 +1,208 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Noise(0.1); + } + + /** + * @return array>}> + */ + public static function backProvider() : array + { + return [ + [ + [ + [0.25, 0.7, 0.1], + [0.5, 0.2, 0.01], + [0.25, 0.1, 0.89], + ], + ], + ]; + } + + /** + * @return array>}> + */ + public static function inferProvider() : array + { + return [ + [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Noise (std dev: 0.1)', (string) $this->layer); + } + + #[Test] + #[TestDox('Constructor rejects invalid standard deviation')] + public function testConstructorRejectsInvalidStdDev() : void + { + $this->expectException(InvalidArgumentException::class); + + // Negative std dev should be rejected + new Noise(-0.1); + } + + #[Test] + #[TestDox('Forward throws if layer is not initialized')] + public function testForwardThrowsIfNotInitialized() : void + { + $layer = new Noise(0.1); + + $this->expectException(RuntimeException::class); + + $layer->forward($this->input); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass that adds Gaussian noise with correct shape and scale')] + public function testForwardAddsNoiseWithCorrectProperties() : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($this->input); + + self::assertInstanceOf(NDArray::class, $forward); + + $inputArray = $this->input->toArray(); + $forwardArray = $forward->toArray(); + + // 1) Shape is preserved + self::assertSameSize($inputArray, $forwardArray); + + // 2) At least one element differs (very high probability) + $allEqual = true; + foreach ($inputArray as $i => $row) { + if ($row !== $forwardArray[$i]) { + $allEqual = false; + break; + } + } + self::assertFalse($allEqual, 'Expected forward output to differ from input due to noise.'); + + // 3) Empirical std dev of (forward - input) is ~ stdDev, within tolerance + $diffs = []; + foreach ($inputArray as $i => $row) { + foreach ($row as $j => $v) { + $diffs[] = $forwardArray[$i][$j] - $v; + } + } + + $n = count($diffs); + $mean = array_sum($diffs) / $n; + + $var = 0.0; + foreach ($diffs as $d) { + $var += ($d - $mean) * ($d - $mean); + } + $var /= $n; + $std = sqrt($var); + + // Mean of noise should be near 0, std near $this->stdDev + self::assertEqualsWithDelta(0.0, $mean, 2e-1); // +/-0.2 around 0 + self::assertEqualsWithDelta(0.1, $std, 1e-1); // +/-0.1 around 0.1 + } + + #[Test] + #[TestDox('Backpropagates and returns previous gradient unchanged')] + #[DataProvider('backProvider')] + public function testBackReturnsPrevGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Infer returns input unchanged')] + #[DataProvider('inferProvider')] + public function testInferIdentity(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 7013a569df2ce38573027d626d39cbf0b11c095d Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 17:58:38 +0200 Subject: [PATCH 17/23] ML-392 Added `Placeholder1D` output layer with complete interface, forward/inference/backward passes, unit tests --- .../hidden-layers/placeholder1d.md | 17 +++ src/NeuralNet/Layers/Base/Contracts/Input.php | 18 +++ .../Layers/Placeholder1D/Placeholder1D.php | 108 +++++++++++++++++ .../Placeholder1D/Placeholder1DTest.php | 114 ++++++++++++++++++ 4 files changed, 257 insertions(+) create mode 100644 docs/neural-network/hidden-layers/placeholder1d.md create mode 100644 src/NeuralNet/Layers/Base/Contracts/Input.php create mode 100644 src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php create mode 100644 tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php diff --git a/docs/neural-network/hidden-layers/placeholder1d.md b/docs/neural-network/hidden-layers/placeholder1d.md new file mode 100644 index 000000000..f70575eee --- /dev/null +++ b/docs/neural-network/hidden-layers/placeholder1d.md @@ -0,0 +1,17 @@ +[source] + +# Placeholder 1D + +The Placeholder 1D input layer represents the future input values of a mini batch (matrix) of single dimensional tensors (vectors) to the neural network. It performs shape validation on the input and then forwards it unchanged to the next layer. + +## Parameters +| # | Name | Default | Type | Description | +|---|---|---|---|---| +| 1 | inputs | | int | The number of input nodes (features). | + +## Example +```php +use Rubix\ML\NeuralNet\Layers\Placeholder1D\Placeholder1D; + +$layer = new Placeholder1D(10); +``` diff --git a/src/NeuralNet/Layers/Base/Contracts/Input.php b/src/NeuralNet/Layers/Base/Contracts/Input.php new file mode 100644 index 000000000..f0d755253 --- /dev/null +++ b/src/NeuralNet/Layers/Base/Contracts/Input.php @@ -0,0 +1,18 @@ + + */ +interface Input extends Layer +{ + // +} diff --git a/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php b/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php new file mode 100644 index 000000000..45f8fc49d --- /dev/null +++ b/src/NeuralNet/Layers/Placeholder1D/Placeholder1D.php @@ -0,0 +1,108 @@ + + */ +class Placeholder1D implements Input +{ + /** + * The number of input nodes. i.e. feature inputs. + * + * @var positive-int + */ + protected int $inputs; + + /** + * @param int $inputs + * @throws InvalidArgumentException + */ + public function __construct(int $inputs) + { + if ($inputs < 1) { + throw new InvalidArgumentException("Number of input nodes must be greater than 0, $inputs given."); + } + + $this->inputs = $inputs; + } + + /** + * @return positive-int + */ + public function width() : int + { + return $this->inputs; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + return $this->inputs; + } + + /** + * Compute a forward pass through the layer. + * + * @param NDArray $input + * @throws InvalidArgumentException + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $shape = $input->shape(); + + if (empty($shape) || $shape[0] !== $this->inputs) { + $features = $shape[0] ?? 0; + + throw new InvalidArgumentException( + 'The number of features and input nodes must be equal,' + . " {$this->inputs} expected but {$features} given."); + } + + return $input; + } + + /** + * Compute an inferential pass through the layer. + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->forward($input); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Placeholder 1D (inputs: {$this->inputs})"; + } +} diff --git a/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php b/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php new file mode 100644 index 000000000..7aa3168c8 --- /dev/null +++ b/tests/NeuralNet/Layers/Placeholder1D/Placeholder1DTest.php @@ -0,0 +1,114 @@ +input = NumPower::array([ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ]); + + $this->layer = new Placeholder1D(3); + } + + /** + * @return array>}> + */ + public static function inputProvider() : array + { + return [ + [ + NumPower::array([ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ]), + [ + [1.0, 2.5], + [0.1, 0.0], + [0.002, -6.0], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('Placeholder 1D (inputs: 3)', (string) $this->layer); + } + + #[Test] + #[TestDox('Returns width equal to number of inputs')] + public function testWidth() : void + { + self::assertEquals(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Constructor rejects invalid number of inputs')] + public function testConstructorRejectsInvalidInputs() : void + { + $this->expectException(InvalidArgumentException::class); + + new Placeholder1D(0); + } + + #[Test] + #[TestDox('Initialize returns fan out equal to inputs without changing width')] + public function testInitialize() : void + { + $fanOut = $this->layer->initialize(5); + + self::assertEquals(3, $fanOut); + self::assertEquals(3, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward pass')] + #[DataProvider('inputProvider')] + public function testForward(NDArray $input, array $expected) : void + { + self::assertEquals(3, $this->layer->width()); + + $forward = $this->layer->forward($input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference pass')] + #[DataProvider('inputProvider')] + public function testInfer(NDArray $input, array $expected) : void + { + self::assertEquals(3, $this->layer->width()); + + $infer = $this->layer->infer($input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } +} From 08ad6b4e3b0faba8e697453cfe64c18457ed6053 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 18:12:45 +0200 Subject: [PATCH 18/23] ML-392 Fixed wrong exception for AssertsShapes and exception texts --- src/NeuralNet/Initializers/He/HeNormal.php | 2 +- src/NeuralNet/Initializers/LeCun/LeCunNormal.php | 2 +- src/NeuralNet/Initializers/Normal/Normal.php | 2 +- src/NeuralNet/Initializers/Normal/TruncatedNormal.php | 2 +- src/NeuralNet/Initializers/Xavier/XavierNormal.php | 2 +- src/Traits/AssertsShapes.php | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/NeuralNet/Initializers/He/HeNormal.php b/src/NeuralNet/Initializers/He/HeNormal.php index 3d68844e4..193c7ff16 100644 --- a/src/NeuralNet/Initializers/He/HeNormal.php +++ b/src/NeuralNet/Initializers/He/HeNormal.php @@ -35,7 +35,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(2 / $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/NeuralNet/Initializers/LeCun/LeCunNormal.php b/src/NeuralNet/Initializers/LeCun/LeCunNormal.php index 81d8add56..3fc5832bc 100644 --- a/src/NeuralNet/Initializers/LeCun/LeCunNormal.php +++ b/src/NeuralNet/Initializers/LeCun/LeCunNormal.php @@ -36,7 +36,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(1 / $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/NeuralNet/Initializers/Normal/Normal.php b/src/NeuralNet/Initializers/Normal/Normal.php index 08c77ff38..acb4ad050 100644 --- a/src/NeuralNet/Initializers/Normal/Normal.php +++ b/src/NeuralNet/Initializers/Normal/Normal.php @@ -43,7 +43,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray { $this->validateFanInFanOut(fanIn: $fanIn, fanOut: $fanOut); - return NumPower::normal(size: [$fanOut, $fanIn], scale: $this->stdDev); + return NumPower::normal(size: [$fanOut, $fanIn], loc: 0.0, scale: $this->stdDev); } /** diff --git a/src/NeuralNet/Initializers/Normal/TruncatedNormal.php b/src/NeuralNet/Initializers/Normal/TruncatedNormal.php index c0c90196d..af9ed43fe 100644 --- a/src/NeuralNet/Initializers/Normal/TruncatedNormal.php +++ b/src/NeuralNet/Initializers/Normal/TruncatedNormal.php @@ -44,7 +44,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray { $this->validateFanInFanOut(fanIn: $fanIn, fanOut: $fanOut); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $this->stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $this->stdDev); } /** diff --git a/src/NeuralNet/Initializers/Xavier/XavierNormal.php b/src/NeuralNet/Initializers/Xavier/XavierNormal.php index dfe5bc956..428c74e49 100644 --- a/src/NeuralNet/Initializers/Xavier/XavierNormal.php +++ b/src/NeuralNet/Initializers/Xavier/XavierNormal.php @@ -36,7 +36,7 @@ public function initialize(int $fanIn, int $fanOut) : NDArray $stdDev = sqrt(2 / ($fanOut + $fanIn)); - return NumPower::truncatedNormal(size: [$fanOut, $fanIn], scale: $stdDev); + return NumPower::truncatedNormal(size: [$fanOut, $fanIn], loc: 0.0, scale: $stdDev); } /** diff --git a/src/Traits/AssertsShapes.php b/src/Traits/AssertsShapes.php index 7fabc316f..88fe23c1e 100644 --- a/src/Traits/AssertsShapes.php +++ b/src/Traits/AssertsShapes.php @@ -4,7 +4,7 @@ namespace Rubix\ML\Traits; -use InvalidArgumentException; +use Rubix\ML\Exceptions\InvalidArgumentException; use NDArray; /** @@ -29,7 +29,7 @@ trait AssertsShapes protected function assertSameShape(NDArray $output, NDArray $target) : void { if ($output->shape() !== $target->shape()) { - throw new InvalidArgumentException('Output and target must have identical shapes.'); + throw new InvalidArgumentException('Output and target must have the same shape.'); } } } From ce99147475683eac03624289ff85627ac6c6695c Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Fri, 19 Dec 2025 18:23:08 +0200 Subject: [PATCH 19/23] ML-392 Increased memory for tests --- phpunit.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/phpunit.xml b/phpunit.xml index 22063bc22..379cdc0a2 100644 --- a/phpunit.xml +++ b/phpunit.xml @@ -83,5 +83,6 @@ + From 7628fecbe13408f2914163288fc8051a96e79ea8 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Mon, 22 Dec 2025 20:14:49 +0200 Subject: [PATCH 20/23] ML-392 Added `PReLU` output layer with complete interface and unit tests --- docs/neural-network/hidden-layers/prelu.md | 6 +- src/NeuralNet/Layers/PReLU/PReLU.php | 287 ++++++++++++++++++++ tests/NeuralNet/Layers/PReLU/PReLUTest.php | 291 +++++++++++++++++++++ 3 files changed, 581 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/PReLU/PReLU.php create mode 100644 tests/NeuralNet/Layers/PReLU/PReLUTest.php diff --git a/docs/neural-network/hidden-layers/prelu.md b/docs/neural-network/hidden-layers/prelu.md index baaef2f32..22a5b4762 100644 --- a/docs/neural-network/hidden-layers/prelu.md +++ b/docs/neural-network/hidden-layers/prelu.md @@ -1,4 +1,4 @@ -[source] +[source] # PReLU Parametric Rectified Linear Units are leaky rectifiers whose *leakage* coefficient is learned during training. Unlike standard [Leaky ReLUs](../activation-functions/leaky-relu.md) whose leakage remains constant, PReLU layers can adjust the leakage to better suite the model on a per node basis. @@ -14,8 +14,8 @@ $$ ## Example ```php -use Rubix\ML\NeuralNet\Layers\PReLU; -use Rubix\ML\NeuralNet\Initializers\Normal; +use Rubix\ML\NeuralNet\Layers\PReLU\PReLU; +use Rubix\ML\NeuralNet\Initializers\Normal\Normal; $layer = new PReLU(new Normal(0.5)); ``` diff --git a/src/NeuralNet/Layers/PReLU/PReLU.php b/src/NeuralNet/Layers/PReLU/PReLU.php new file mode 100644 index 000000000..a8986cce4 --- /dev/null +++ b/src/NeuralNet/Layers/PReLU/PReLU.php @@ -0,0 +1,287 @@ + + */ +class PReLU implements Hidden, Parametric +{ + /** + * The initializer of the alpha (leakage) parameter. + * + * @var Initializer + */ + protected Initializer $initializer; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * The parameterized leakage coefficients. + * + * @var Parameter|null + */ + protected ?Parameter $alpha = null; + + /** + * The memoized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * @param Initializer|null $initializer + */ + public function __construct(?Initializer $initializer = null) + { + $this->initializer = $initializer ?? new Constant(0.25); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + // Initialize alpha as a vector of length fanOut (one alpha per neuron) + // Using shape [fanOut, 1] then flattening to [fanOut] + $alphaMat = $this->initializer->initialize(1, $fanOut); + $alpha = NumPower::flatten($alphaMat); + + $this->width = $fanOut; + $this->alpha = new Parameter($alpha); + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $this->input = $input; + + return $this->activate($input); + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->activate($input); + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->input) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + // Negative part of the input (values <= 0), used for dL/dalpha + $negativeInput = NumPower::minimum($this->input, 0.0); + + $dAlphaFull = NumPower::multiply($dOut, $negativeInput); + + // Sum over the batch axis (axis = 1) to obtain a gradient vector [width] + $dAlpha = NumPower::sum($dAlphaFull, axis: 1); + + $this->alpha->update($dAlpha, $optimizer); + + $input = $this->input; + + $this->input = null; + + return new Deferred([$this, 'gradient'], [$input, $dOut]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray $dOut + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $dOut) : NDArray + { + $derivative = $this->differentiate($input); + + return NumPower::multiply($derivative, $dOut); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws \RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'alpha' => $this->alpha; + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->alpha = $parameters['alpha']; + } + + /** + * Compute the leaky ReLU activation function and return a matrix. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + protected function activate(NDArray $input) : NDArray + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Reshape alpha vector [width] to column [width, 1] for broadcasting + $alphaCol = NumPower::reshape($this->alpha->param(), [$this->width(), 1]); + + $positiveActivation = NumPower::maximum($input, 0.0); + + $negativeActivation = NumPower::multiply( + NumPower::minimum($input, 0.0), + $alphaCol, + ); + + return NumPower::add($positiveActivation, $negativeActivation); + } + + /** + * Calculate the derivative of the activation function at a given output. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + protected function differentiate(NDArray $input) : NDArray + { + if (!$this->alpha) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Reshape alpha vector [width] to column [width, 1] for broadcasting + $alphaCol = NumPower::reshape($this->alpha->param(), [$this->width(), 1]); + + $positivePart = NumPower::greater($input, 0.0); + + $negativePart = NumPower::multiply( + NumPower::lessEqual($input, 0.0), + $alphaCol, + ); + + return NumPower::add($positivePart, $negativePart); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "PReLU (initializer: {$this->initializer})"; + } +} diff --git a/tests/NeuralNet/Layers/PReLU/PReLUTest.php b/tests/NeuralNet/Layers/PReLU/PReLUTest.php new file mode 100644 index 000000000..a1193ea09 --- /dev/null +++ b/tests/NeuralNet/Layers/PReLU/PReLUTest.php @@ -0,0 +1,291 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new PReLU(new Constant(0.25)); + + srand(self::RANDOM_SEED); + } + + /** + * @return array + */ + public static function initializeProvider() : array + { + return [ + 'fanIn=3' => [3], + ]; + } + + /** + * @return array + */ + public static function forwardProvider() : array + { + return [ + 'expectedForward' => [[ + [1.0, 2.5, -0.025], + [0.1, 0.0, 3.0], + [0.002, -1.5, -0.125], + ]], + ]; + } + + /** + * @return array + */ + public static function backProvider() : array + { + return [ + 'expectedGradient' => [[ + [0.25, 0.6999999, 0.0250010], + [0.5, 0.05, 0.01], + [0.25, 0.0251045, 0.2234300], + ]], + ]; + } + + /** + * @return array + */ + public static function gradientProvider() : array + { + return [ + 'expectedGradient' => [[ + [0.25, 0.7, 0.025], + [0.5, 0.05, 0.01], + [0.25, 0.025, 0.2225], + ]], + ]; + } + + /** + * @return array + */ + public static function inferProvider() : array + { + return [ + 'expectedInfer' => [[ + [1.0, 2.5, -0.0250000], + [0.1, 0.0, 3.0], + [0.0020000, -1.5, -0.125], + ]], + ]; + } + + /** + * @return array + */ + public static function activateProvider() : array + { + return [ + 'defaultInput' => [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + [ + [1.0, 2.5, -0.025], + [0.1, 0.0, 3.0], + [0.002, -1.5, -0.125], + ], + ], + ]; + } + + /** + * @return array + */ + public static function differentiateProvider() : array + { + return [ + 'defaultInput' => [ + [ + [1.0, 2.5, -0.1], + [0.1, 0.0, 3.0], + [0.002, -6.0, -0.5], + ], + [ + [1.0, 1.0, 0.25], + [1.0, 0.25, 1.0], + [1.0, 0.25, 0.25], + ], + ], + ]; + } + + #[Test] + #[TestDox('Can be cast to a string')] + public function testToString() : void + { + self::assertEquals('PReLU (initializer: Constant (value: 0.25))', (string) $this->layer); + } + + #[Test] + #[TestDox('Initializes width equal to fan-in')] + public function testInitializeSetsWidth() : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Initializes and returns fan out equal to fan-in')] + #[DataProvider('initializeProvider')] + public function testInitializeReturnsFanOut(int $fanIn) : void + { + $fanOut = $this->layer->initialize($fanIn); + + self::assertEquals($fanIn, $fanOut); + self::assertEquals($fanIn, $this->layer->width()); + } + + #[Test] + #[TestDox('Computes forward activations')] + #[DataProvider('forwardProvider')] + public function testForward(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($expected, $forward->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Backpropagates and returns gradient for previous layer')] + #[DataProvider('backProvider')] + public function testBack(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + // Forward pass to set internal input state + $this->layer->forward($this->input); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes gradient for previous layer directly')] + #[DataProvider('gradientProvider')] + public function testGradient(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $gradient = $this->layer->gradient( + $this->input, + ($this->prevGrad)(), + ); + + self::assertEqualsWithDelta($expected, $gradient->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Computes inference activations')] + #[DataProvider('inferProvider')] + public function testInfer(array $expected) : void + { + $this->layer->initialize($this->fanIn); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($expected, $infer->toArray(), 1e-7); + } + + #[Test] + #[TestDox('Yields trainable alpha parameter')] + public function testParameters() : void + { + $this->layer->initialize($this->fanIn); + + $params = iterator_to_array($this->layer->parameters()); + + self::assertArrayHasKey('alpha', $params); + self::assertInstanceOf(TrainableParameter::class, $params['alpha']); + } + + #[Test] + #[TestDox('Restores alpha parameter from array')] + public function testRestore() : void + { + $this->layer->initialize($this->fanIn); + + $alphaNew = new TrainableParameter(NumPower::full([$this->fanIn], 0.5)); + + $this->layer->restore([ + 'alpha' => $alphaNew, + ]); + + $restored = iterator_to_array($this->layer->parameters()); + + self::assertSame($alphaNew, $restored['alpha']); + self::assertEquals( + array_fill(0, $this->fanIn, 0.5), + $restored['alpha']->param()->toArray(), + ); + } +} From f83fed6a592247c122db35a997e990cb4e7775fc Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 24 Dec 2025 20:06:37 +0200 Subject: [PATCH 21/23] ML-392 Added `Swish` output layer with complete interface and unit tests --- docs/neural-network/hidden-layers/swish.md | 6 +- src/NeuralNet/Layers/Swish/Swish.php | 303 +++++++++++++++++++++ tests/NeuralNet/Layers/Swish/SwishTest.php | 202 ++++++++++++++ 3 files changed, 508 insertions(+), 3 deletions(-) create mode 100644 src/NeuralNet/Layers/Swish/Swish.php create mode 100644 tests/NeuralNet/Layers/Swish/SwishTest.php diff --git a/docs/neural-network/hidden-layers/swish.md b/docs/neural-network/hidden-layers/swish.md index e91138566..29e6677f7 100644 --- a/docs/neural-network/hidden-layers/swish.md +++ b/docs/neural-network/hidden-layers/swish.md @@ -1,4 +1,4 @@ -[source] +[source] # Swish Swish is a parametric activation layer that utilizes smooth rectified activation functions. The trainable *beta* parameter allows each activation function in the layer to tailor its output to the training set by interpolating between the linear function and ReLU. @@ -10,8 +10,8 @@ Swish is a parametric activation layer that utilizes smooth rectified activation ## Example ```php -use Rubix\ML\NeuralNet\Layers\Swish; -use Rubix\ML\NeuralNet\Initializers\Constant; +use Rubix\ML\NeuralNet\Layers\Swish\Swish; +use Rubix\ML\NeuralNet\Initializers\Constant\Constant; $layer = new Swish(new Constant(1.0)); ``` diff --git a/src/NeuralNet/Layers/Swish/Swish.php b/src/NeuralNet/Layers/Swish/Swish.php new file mode 100644 index 000000000..fcb00fa44 --- /dev/null +++ b/src/NeuralNet/Layers/Swish/Swish.php @@ -0,0 +1,303 @@ + + */ +class Swish implements Hidden, Parametric +{ + /** + * The initializer of the beta parameter. + * + * @var Initializer + */ + protected Initializer $initializer; + + /** + * The sigmoid activation function. + * + * @var Sigmoid + */ + protected Sigmoid $sigmoid; + + /** + * The width of the layer. + * + * @var positive-int|null + */ + protected ?int $width = null; + + /** + * The parameterized scaling factors. + * + * @var Parameter|null + */ + protected ?Parameter $beta = null; + + /** + * The memoized input matrix. + * + * @var NDArray|null + */ + protected ?NDArray $input = null; + + /** + * The memorized activation matrix. + * + * @var NDArray|null + */ + protected ?NDArray $output = null; + + /** + * @param Initializer|null $initializer + */ + public function __construct(?Initializer $initializer = null) + { + $this->initializer = $initializer ?? new Constant(1.0); + $this->sigmoid = new Sigmoid(); + } + + /** + * Return the width of the layer. + * + * @internal + * + * @throws RuntimeException + * @return positive-int + */ + public function width() : int + { + if ($this->width === null) { + throw new RuntimeException('Layer has not been initialized.'); + } + + return $this->width; + } + + /** + * Initialize the layer with the fan in from the previous layer and return + * the fan out for this layer. + * + * @internal + * + * @param positive-int $fanIn + * @return positive-int + */ + public function initialize(int $fanIn) : int + { + $fanOut = $fanIn; + + // Initialize beta as a vector of length fanOut (one beta per neuron) + // Using shape [fanOut, 1] then flattening to [fanOut] + $betaMat = $this->initializer->initialize(1, $fanOut); + $beta = NumPower::flatten($betaMat); + + $this->width = $fanOut; + $this->beta = new Parameter($beta); + + return $fanOut; + } + + /** + * Compute a forward pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function forward(NDArray $input) : NDArray + { + $this->input = $input; + + $this->output = $this->activate($input); + + return $this->output; + } + + /** + * Compute an inferential pass through the layer. + * + * @internal + * + * @param NDArray $input + * @return NDArray + */ + public function infer(NDArray $input) : NDArray + { + return $this->activate($input); + } + + /** + * Calculate the gradient and update the parameters of the layer. + * + * @internal + * + * @param Deferred $prevGradient + * @param Optimizer $optimizer + * @throws RuntimeException + * @return Deferred + */ + public function back(Deferred $prevGradient, Optimizer $optimizer) : Deferred + { + if (!$this->beta) { + throw new RuntimeException('Layer has not been initialized.'); + } + + if (!$this->input or !$this->output) { + throw new RuntimeException('Must perform forward pass before backpropagating.'); + } + + /** @var NDArray $dOut */ + $dOut = $prevGradient(); + + // Gradient of the loss with respect to beta + // dL/dbeta = sum_over_batch(dL/dy * dy/dbeta) + // Here we use a simplified formulation: dL/dbeta ~ sum(dOut * input) + $dBetaFull = NumPower::multiply($dOut, $this->input); + + // Sum over the batch axis (axis = 1) to obtain a gradient vector [width] + $dBeta = NumPower::sum($dBetaFull, axis: 1); + + $this->beta->update($dBeta, $optimizer); + + $input = $this->input; + $output = $this->output; + + $this->input = $this->output = null; + + return new Deferred([$this, 'gradient'], [$input, $output, $dOut]); + } + + /** + * Calculate the gradient for the previous layer. + * + * @internal + * + * @param NDArray $input + * @param NDArray $output + * @param NDArray $dOut + * @return NDArray + */ + public function gradient(NDArray $input, NDArray $output, NDArray $dOut) : NDArray + { + $derivative = $this->differentiate($input, $output); + + return NumPower::multiply($derivative, $dOut); + } + + /** + * Return the parameters of the layer. + * + * @internal + * + * @throws \RuntimeException + * @return Generator + */ + public function parameters() : Generator + { + if (!$this->beta) { + throw new RuntimeException('Layer has not been initialized.'); + } + + yield 'beta' => $this->beta; + } + + /** + * Restore the parameters in the layer from an associative array. + * + * @internal + * + * @param Parameter[] $parameters + */ + public function restore(array $parameters) : void + { + $this->beta = $parameters['beta']; + } + + /** + * Compute the Swish activation function and return a matrix. + * + * @param NDArray $input + * @throws RuntimeException + * @return NDArray + */ + protected function activate(NDArray $input) : NDArray + { + if (!$this->beta) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Reshape beta vector [width] to column [width, 1] for broadcasting + $betaCol = NumPower::reshape($this->beta->param(), [$this->width(), 1]); + + $zHat = NumPower::multiply($betaCol, $input); + + $activated = $this->sigmoid->activate($zHat); + + return NumPower::multiply($activated, $input); + } + + /** + * Calculate the derivative of the activation function at a given output. + * + * @param NDArray $input + * @param NDArray $output + * @throws RuntimeException + * @return NDArray + */ + protected function differentiate(NDArray $input, NDArray $output) : NDArray + { + if (!$this->beta) { + throw new RuntimeException('Layer has not been initialized.'); + } + + // Original formulation: + // derivative = (output / input) * (1 - output) + output + // Implemented using NumPower operations to avoid explicit ones matrix. + $term1 = NumPower::divide($output, $input); + $oneMinusOutput = NumPower::subtract(1.0, $output); + + $product = NumPower::multiply($term1, $oneMinusOutput); + + return NumPower::add($product, $output); + } + + /** + * Return the string representation of the object. + * + * @internal + * + * @return string + */ + public function __toString() : string + { + return "Swish (initializer: {$this->initializer})"; + } +} diff --git a/tests/NeuralNet/Layers/Swish/SwishTest.php b/tests/NeuralNet/Layers/Swish/SwishTest.php new file mode 100644 index 000000000..5f8d55503 --- /dev/null +++ b/tests/NeuralNet/Layers/Swish/SwishTest.php @@ -0,0 +1,202 @@ +fanIn = 3; + + $this->input = NumPower::array([ + [1.0, 2.5, -0.1], + [0.1, 0.1, 3.0], + [0.002, -6.0, -0.5], + ]); + + $this->prevGrad = new Deferred(fn: function () : NDArray { + return NumPower::array([ + [0.25, 0.7, 0.1], + [0.50, 0.2, 0.01], + [0.25, 0.1, 0.89], + ]); + }); + + $this->optimizer = new Stochastic(0.001); + + $this->layer = new Swish(new Constant(1.0)); + } + + /** + * @return array>>> + */ + public static function initializeForwardBackInferProvider() : array + { + return [ + [ + 'forwardExpected' => [ + [0.7310585, 2.3103545, -0.0475020], + [0.0524979, 0.0524979, 2.8577223], + [0.0010009, -0.0148357, -0.1887703], + ], + 'backExpected' => [ + [0.2319176, 0.7695808, 0.0450083], + [0.2749583, 0.1099833, 0.0108810], + [0.1252499, -0.0012326, 0.2314345], + ], + 'inferExpected' => [ + [0.7306671, 2.3094806, -0.0475070], + [0.0524976, 0.0524976, 2.8576817], + [0.0010010, -0.0147432, -0.1887089], + ], + ], + ]; + } + + /** + * @return array + */ + public static function toStringProvider() : array + { + return [ + 'value one' => [1.0, 'Swish (initializer: Constant (value: 1))'], + 'value zero' => [0.0, 'Swish (initializer: Constant (value: 0))'], + ]; + } + + #[DataProvider('initializeForwardBackInferProvider')] + public function testInitializeForwardBackInfer( + array $forwardExpected, + array $backExpected, + array $inferExpected, + ) : void + { + $this->layer->initialize($this->fanIn); + + self::assertEquals($this->fanIn, $this->layer->width()); + + $forward = $this->layer->forward($this->input); + + self::assertEqualsWithDelta($forwardExpected, $forward->toArray(), 1e-7); + + $gradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + self::assertInstanceOf(NDArray::class, $gradient); + self::assertEqualsWithDelta($backExpected, $gradient->toArray(), 1e-7); + + $infer = $this->layer->infer($this->input); + + self::assertEqualsWithDelta($inferExpected, $infer->toArray(), 1e-7); + } + + #[DataProvider('toStringProvider')] + public function testToString(float $value, string $expected) : void + { + $layer = new Swish(new Constant($value)); + + self::assertSame($expected, (string) $layer); + } + + public function testWidthThrowsIfNotInitialized() : void + { + $layer = new Swish(); + + $this->expectException(RuntimeException::class); + $this->expectExceptionMessage('Layer has not been initialized.'); + + $layer->width(); + } + + public function testInitializeReturnsFanOutAndSetsWidth() : void + { + $fanIn = 4; + $layer = new Swish(new Constant(1.0)); + + $fanOut = $layer->initialize($fanIn); + + self::assertSame($fanIn, $fanOut); + self::assertSame($fanIn, $layer->width()); + } + + public function testParametersAndRestore() : void + { + $this->layer->initialize($this->fanIn); + + $parameters = iterator_to_array($this->layer->parameters()); + + self::assertArrayHasKey('beta', $parameters); + self::assertInstanceOf(Parameter::class, $parameters['beta']); + + $betaParam = $parameters['beta']; + $originalBeta = $betaParam->param()->toArray(); + + $newLayer = new Swish(new Constant(0.0)); + $newLayer->initialize($this->fanIn); + + $newLayer->restore($parameters); + + $restoredParams = iterator_to_array($newLayer->parameters()); + + self::assertArrayHasKey('beta', $restoredParams); + self::assertInstanceOf(Parameter::class, $restoredParams['beta']); + + $restoredBeta = $restoredParams['beta']->param()->toArray(); + + self::assertEquals($originalBeta, $restoredBeta); + } + + public function testGradientMatchesBackpropagatedGradient() : void + { + $this->layer->initialize($this->fanIn); + + $output = $this->layer->forward($this->input); + + $backGradient = $this->layer->back( + prevGradient: $this->prevGrad, + optimizer: $this->optimizer + )->compute(); + + $directGradient = $this->layer->gradient( + $this->input, + $output, + ($this->prevGrad)() + ); + + self::assertInstanceOf(NDArray::class, $directGradient); + self::assertEqualsWithDelta($backGradient->toArray(), $directGradient->toArray(), 1e-7); + } +} From be52a098de8cb75475d1c6c05744219a03375e64 Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Wed, 24 Dec 2025 20:16:43 +0200 Subject: [PATCH 22/23] ML-392 Fixed syntax issues in tests annotations --- tests/Helpers/GraphvizTest.php | 9 ++-- tests/NeuralNet/FeedForwardTest.php | 52 ++++++++----------- .../SamplesAreCompatibleWithDistanceTest.php | 8 +-- 3 files changed, 32 insertions(+), 37 deletions(-) diff --git a/tests/Helpers/GraphvizTest.php b/tests/Helpers/GraphvizTest.php index da1c70b99..3bef96a06 100644 --- a/tests/Helpers/GraphvizTest.php +++ b/tests/Helpers/GraphvizTest.php @@ -6,6 +6,8 @@ use PHPUnit\Framework\Attributes\CoversClass; use PHPUnit\Framework\Attributes\Group; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Encoding; use Rubix\ML\Helpers\Graphviz; use PHPUnit\Framework\TestCase; @@ -14,9 +16,8 @@ #[CoversClass(GraphvizTest::class)] class GraphvizTest extends TestCase { - /** - * @test - */ + #[Test] + #[TestDox('Converts a DOT graph description to an image encoding')] public function dotToImage() : void { // Almost always skip this test, needed to appease Stan. @@ -56,6 +57,6 @@ public function dotToImage() : void $encoding = Graphviz::dotToImage($dot, 'png'); - $this->assertInstanceOf(Encoding::class, $encoding); + self::assertInstanceOf(Encoding::class, $encoding); } } diff --git a/tests/NeuralNet/FeedForwardTest.php b/tests/NeuralNet/FeedForwardTest.php index c68ae47be..a060975e0 100644 --- a/tests/NeuralNet/FeedForwardTest.php +++ b/tests/NeuralNet/FeedForwardTest.php @@ -14,11 +14,14 @@ use Rubix\ML\NeuralNet\ActivationFunctions\ReLU; use Rubix\ML\NeuralNet\CostFunctions\CrossEntropy; use PHPUnit\Framework\TestCase; - -/** - * @group NeuralNet - * @covers \Rubix\ML\NeuralNet\FeedForward - */ +use PHPUnit\Framework\Attributes\Before; +use PHPUnit\Framework\Attributes\CoversClass; +use PHPUnit\Framework\Attributes\Group; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; + +#[Group('NeuralNet')] +#[CoversClass(FeedForward::class)] class FeedForwardTest extends TestCase { /** @@ -46,9 +49,7 @@ class FeedForwardTest extends TestCase */ protected $output; - /** - * @before - */ + #[Before] protected function setUp() : void { $this->dataset = Labeled::quick([ @@ -72,50 +73,44 @@ protected function setUp() : void $this->network = new FeedForward($this->input, $this->hidden, $this->output, new Adam(0.001)); } - /** - * @test - */ + #[Test] + #[TestDox('Builds a feed-forward network instance')] public function build() : void { $this->assertInstanceOf(FeedForward::class, $this->network); $this->assertInstanceOf(Network::class, $this->network); } - /** - * @test - */ + #[Test] + #[TestDox('Returns all hidden and output layers')] public function layers() : void { $this->assertCount(5, iterator_to_array($this->network->layers())); } - /** - * @test - */ + #[Test] + #[TestDox('Returns the input layer')] public function input() : void { $this->assertInstanceOf(Placeholder1D::class, $this->network->input()); } - /** - * @test - */ + #[Test] + #[TestDox('Returns the hidden layers')] public function hidden() : void { $this->assertCount(5, $this->network->hidden()); } - /** - * @test - */ + #[Test] + #[TestDox('Returns the output layer')] public function networkOutput() : void { $this->assertInstanceOf(Output::class, $this->network->output()); } - /** - * @test - */ + #[Test] + #[TestDox('Reports the correct number of parameters after initialization')] public function numParams() : void { $this->network->initialize(); @@ -123,9 +118,8 @@ public function numParams() : void $this->assertEquals(103, $this->network->numParams()); } - /** - * @test - */ + #[Test] + #[TestDox('Performs a roundtrip pass and returns a loss value')] public function roundtrip() : void { $this->network->initialize(); diff --git a/tests/Specifications/SamplesAreCompatibleWithDistanceTest.php b/tests/Specifications/SamplesAreCompatibleWithDistanceTest.php index 885eb5d7b..7b564fbd2 100644 --- a/tests/Specifications/SamplesAreCompatibleWithDistanceTest.php +++ b/tests/Specifications/SamplesAreCompatibleWithDistanceTest.php @@ -7,6 +7,8 @@ use PHPUnit\Framework\Attributes\CoversClass; use PHPUnit\Framework\Attributes\DataProvider; use PHPUnit\Framework\Attributes\Group; +use PHPUnit\Framework\Attributes\Test; +use PHPUnit\Framework\Attributes\TestDox; use Rubix\ML\Datasets\Unlabeled; use Rubix\ML\Kernels\Distance\Hamming; use Rubix\ML\Kernels\Distance\Euclidean; @@ -61,11 +63,9 @@ public static function passesProvider() : Generator ]; } - /** - * @param SamplesAreCompatibleWithDistance $specification - * @param bool $expected - */ #[DataProvider('passesProvider')] + #[Test] + #[TestDox('Checks whether samples are compatible with the given distance metric')] public function passes(SamplesAreCompatibleWithDistance $specification, bool $expected) : void { $this->assertSame($expected, $specification->passes()); From 0b591eda5bd4ea3a14d33c70f1fb28109c21afcb Mon Sep 17 00:00:00 2001 From: Samuel Akopyan Date: Sun, 4 Jan 2026 19:46:05 +0200 Subject: [PATCH 23/23] ML-392 Typo fixes --- docs/neural-network/optimizers/cyclical.md | 2 +- src/NeuralNet/Optimizers/Cyclical.php | 2 +- src/NeuralNet/Optimizers/Cyclical/Cyclical.php | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/neural-network/optimizers/cyclical.md b/docs/neural-network/optimizers/cyclical.md index eed8b2779..02622461b 100644 --- a/docs/neural-network/optimizers/cyclical.md +++ b/docs/neural-network/optimizers/cyclical.md @@ -11,7 +11,7 @@ $$ \text{cycle} &= \left\lfloor 1 + \frac{t}{2\,\text{steps}} \right\rfloor \\ x &= \left| \frac{t}{\text{steps}} - 2\,\text{cycle} + 1 \right| \\ \text{scale} &= \text{decay}^{\,t} \\ -\eta_t &= \text{lower} + (\text{upper} - \text{lower})\,\max\bigl(0\,1 - x\bigr)\,\text{scale} \\ +\eta_t &= \text{lower} + (\text{upper} - \text{lower})\,\max\bigl(0,1 - x\bigr)\,\text{scale} \\ \Delta\theta_t &= \eta_t\,g_t \end{aligned} $$ diff --git a/src/NeuralNet/Optimizers/Cyclical.php b/src/NeuralNet/Optimizers/Cyclical.php index dcce49bf2..606228611 100644 --- a/src/NeuralNet/Optimizers/Cyclical.php +++ b/src/NeuralNet/Optimizers/Cyclical.php @@ -86,7 +86,7 @@ public function __construct( if ($lower > $upper) { throw new InvalidArgumentException('Lower bound cannot be' - . ' reater than the upper bound.'); + . ' greater than the upper bound.'); } if ($losses < 1) { diff --git a/src/NeuralNet/Optimizers/Cyclical/Cyclical.php b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php index ac22d9d52..ca929cdeb 100644 --- a/src/NeuralNet/Optimizers/Cyclical/Cyclical.php +++ b/src/NeuralNet/Optimizers/Cyclical/Cyclical.php @@ -90,7 +90,7 @@ public function __construct( if ($lower > $upper) { throw new InvalidArgumentException( - 'Lower bound cannot be reater than the upper bound.' + 'Lower bound cannot be greater than the upper bound.' ); }