Source code for lightonml.projections.torch

# -*- coding: utf8
import warnings

import torch.nn as nn

from lightonopu import OPU
from lightonopu.simulated_device import SimulatedOpuDevice


[docs]class OPUMap(nn.Module): """Adapter of the OPU to the Pytorch interface. Forward method is mapped to transform1d, transform2d, or transform3d of the OPU class, depending on ndims parameter at the construction. @see lightonopu.opu.OPU Parameters ---------- n_components: int, dimensionality of the target projection space. opu : lightonopu.opu.OPU, optical processing unit instance ndims : int, number of dimensions of an input. Can be 1, 2 or 3. if ndims is 1, transform accepts 1d vector or batch of 1d vectors. if ndims is 2, transform accepts 2d vector or batch of 2d vectors. packed: bool, optional whether the input data is in bit-packed representation if packed is True and ndims is 2, each input vector is assumed to be a 1d array, and the "real" number of features must be provided using n_2d_features parameter defaults to False n_2d_features: list(int) or tuple(int) or np.ndarray (optional) number of 2d features if the input is packed simulated: bool, default False, use real or simulated OPU max_n_features: int, optional maximum number of binary features that the OPU will transform used only if simulated=True, in order to initiate the random matrix verbose_level: int, optional 0, 1 or 2. 0 = no messages, 1 = most messages, and 2 = messages from OPU device (very verbose). Attributes ---------- opu : lightonopu.opu.OPU, optical processing unit instance n_components : int, dimensionality of the target projection space. ndims : int, number of dimensions of an input. Can be 1, 2 or 3. if ndims is 1, transform accepts 1d vector or batch of 1d vectors. if ndims is 2, transform accepts 2d vector or batch of 2d vectors. packed: bool, optional whether the input data is in bit-packed representation if packed is True and ndims is 2, each input vector is assumed to be a 1d array, and the "real" number of features must be provided using n_2d_features parameter defaults to False n_2d_features: list(int) or tuple(int) or np.ndarray (optional) number of 2d features if the input is packed simulated: bool, default False, use real or simulated OPU max_n_features: int, optional maximum number of binary features that the OPU will transform used only if simulated=True, in order to initiate the random matrix """ def __init__(self, n_components, opu=None, ndims=1, n_2d_features=None, packed=False, simulated=False, max_n_features=None, verbose_level=0): super(OPUMap, self).__init__() if opu is None: if simulated: simulated_opu = SimulatedOpuDevice() if max_n_features is None: raise ValueError("When using simulated=True, you need to provide max_n_features.") self.opu = OPU(opu_device=simulated_opu, max_n_features=max_n_features, n_components=n_components, verbose_level=verbose_level) else: self.opu = OPU(n_components=n_components, verbose_level=verbose_level) else: self.opu = opu self.opu.n_components = n_components if simulated and not isinstance(opu, SimulatedOpuDevice): warnings.warn("You provided a real OPU object but set simulated=True." " Will use the real OPU.") if isinstance(opu, SimulatedOpuDevice) and not simulated: warnings.warn("You provided a simulated OPU object but set simulated=False. " "Will use simulated OPU.") self.n_components = self.opu.n_components if ndims not in [1, 2]: raise ValueError("Number of input dimensions must be 1 or 2") self.ndims = ndims self.n_2d_features = n_2d_features self.packed = packed self.simulated = simulated self.max_n_features = max_n_features if self.ndims == 1: self.transform = lambda X: self.opu.transform1d(X, self.packed) if self.ndims == 2: self.transform = lambda X: self.opu.transform2d(X, self.packed, self.n_2d_features) print("OPU output is detached from the computational graph.") @property def n_components(self): return self.opu.n_components @n_components.setter def n_components(self, value): self.opu.n_components = value
[docs] def forward(self, input): """Performs the nonlinear random projections. @see lightonopu.opu.transform """ output = self.transform(input) return output.detach()
def extra_repr(self): return 'out_features={}, n_dims={}, packed={} simulated={}'.format( self.n_components, self.n_dims, self.packed, self.simulated )