diff --git a/README.md b/README.md
index 73ff40347..133f1e248 100644
--- a/README.md
+++ b/README.md
@@ -193,7 +193,6 @@ To cite Concrete ML, notably in academic papers, please use the following entry,
-
## License.
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions, please contact us at hello@zama.ai.
diff --git a/docs/conf.py b/docs/conf.py
index 4a59c28e5..ddcd4fb27 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -27,7 +27,7 @@
root_url = root_url if root_url.endswith("/") else root_url + "/"
# The full version, including alpha/beta/rc tags
-release = "1.4.0"
+release = "1.4.1"
# -- General configuration ---------------------------------------------------
diff --git a/docs/developer-guide/api/concrete.ml.onnx.ops_impl.md b/docs/developer-guide/api/concrete.ml.onnx.ops_impl.md
index e12e5bece..f71c2a3ec 100644
--- a/docs/developer-guide/api/concrete.ml.onnx.ops_impl.md
+++ b/docs/developer-guide/api/concrete.ml.onnx.ops_impl.md
@@ -1333,10 +1333,12 @@ ______________________________________________________________________
```python
numpy_avgpool(
x: ndarray,
- ceil_mode: int,
kernel_shape: Tuple[int, ],
- pads: Tuple[int, ] = None,
- strides: Tuple[int, ] = None
+ auto_pad: str = 'NOTSET',
+ ceil_mode: int = 0,
+ count_include_pad: int = 1,
+ pads: Optional[Tuple[int, ]] = None,
+ strides: Optional[Tuple[int, ]] = None
) → Tuple[ndarray]
```
@@ -1348,24 +1350,22 @@ See: https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool
**Args:**
-- `x` (numpy.ndarray): input data (many dtypes are supported). Shape is N x C x H x W for 2d
-- `ceil_mode` (int): ONNX rounding parameter, expected 0 (torch style dimension computation)
-- `kernel_shape` (Tuple\[int, ...\]): shape of the kernel. Should have 2 elements for 2d conv
-- `pads` (Tuple\[int, ...\]): padding in ONNX format (begin, end) on each axis
-- `strides` (Tuple\[int, ...\]): stride of the convolution on each axis
+- `x` (numpy.ndarray): Input data of shape (N, C, H, W), as only 2D inputs are currently supported.
+- `kernel_shape` (Tuple\[int, ...\]): The size of the kernel along each axis. Currently, only 2D kernels are supported.
+- `auto_pad` (str): Only the default "NOTSET" value is currently supported, which means explicit padding is used.
+- `ceil_mode` (int): Whether to use ONNX's ceil (1) or floor (0, the default) to compute the output shape.
+- `count_include_pad` (int): Whether include pad pixels when calculating values for the edges. Currently, setting this parameter to 0 is not supported in Concrete ML.
+- `pads` (Tuple\[int, ...\]): Padding for the beginning and ending along each spatial axis. Expected format is \[x1_begin, x2_begin...x1_end, x2_end, ...\] where xi_begin (resp. xi_end) is the number of pixels added at the beginning (resp. end) of axis `i`.
+- `strides` (Tuple\[int, ...\]): Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.
**Returns:**
- `res` (numpy.ndarray): a tensor of size (N x InChannels x OutHeight x OutWidth).
- `See https`: //pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html
-**Raises:**
-
-- `AssertionError`: if the pooling arguments are wrong
-
______________________________________________________________________
-
+
## function `numpy_maxpool`
@@ -1406,7 +1406,7 @@ See: https://github.com/onnx/onnx/blob/main/docs/Operators.md#MaxPool
______________________________________________________________________
-
+
## function `numpy_cast`
@@ -1431,7 +1431,7 @@ See: https://github.com/onnx/onnx/blob/main/docs/Operators.md#Cast
______________________________________________________________________
-
+
## function `numpy_batchnorm`
@@ -1473,7 +1473,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#BatchNormalization-
______________________________________________________________________
-
+
## function `numpy_flatten`
@@ -1496,7 +1496,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Flatten-13.
______________________________________________________________________
-
+
## function `numpy_or`
@@ -1519,7 +1519,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Or-7
______________________________________________________________________
-
+
## function `numpy_or_float`
@@ -1542,7 +1542,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Or-7
______________________________________________________________________
-
+
## function `numpy_round`
@@ -1564,7 +1564,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Round-11 Remark tha
______________________________________________________________________
-
+
## function `numpy_pow`
@@ -1587,7 +1587,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Pow-13
______________________________________________________________________
-
+
## function `numpy_floor`
@@ -1609,7 +1609,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Floor-1
______________________________________________________________________
-
+
## function `numpy_max`
@@ -1634,7 +1634,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Max-1
______________________________________________________________________
-
+
## function `numpy_min`
@@ -1659,7 +1659,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Max-1
______________________________________________________________________
-
+
## function `numpy_sign`
@@ -1681,7 +1681,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Sign-9
______________________________________________________________________
-
+
## function `numpy_neg`
@@ -1703,7 +1703,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Sign-9
______________________________________________________________________
-
+
## function `numpy_concatenate`
diff --git a/docs/developer-guide/api/concrete.ml.quantization.base_quantized_op.md b/docs/developer-guide/api/concrete.ml.quantization.base_quantized_op.md
index b82818135..faecefe79 100644
--- a/docs/developer-guide/api/concrete.ml.quantization.base_quantized_op.md
+++ b/docs/developer-guide/api/concrete.ml.quantization.base_quantized_op.md
@@ -15,7 +15,7 @@ Base Quantized Op class that implements quantization for a float numpy op.
______________________________________________________________________
-
+
## class `QuantizedOp`
@@ -29,7 +29,7 @@ Base class for quantized ONNX ops implemented in numpy.
- `constant_inputs` (Optional\[Union\[Dict\[str, Any\], Dict\[int, Any\]\]\]): The constant tensors that are inputs to this op
- `input_quant_opts` (QuantizationOptions): Input quantizer options, determine the quantization that is applied to input tensors (that are not constants)
-
+
### method `__init__`
@@ -56,7 +56,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `calibrate`
@@ -76,7 +76,7 @@ Create corresponding QuantizedArray for the output of the activation function.
______________________________________________________________________
-
+
### method `call_impl`
@@ -97,7 +97,7 @@ Call self.impl to centralize mypy bug workaround.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -115,7 +115,7 @@ This function shall be overloaded by inheriting classes to test self.\_int_input
______________________________________________________________________
-
+
### method `dump`
@@ -131,7 +131,7 @@ Dump itself to a file.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -147,7 +147,7 @@ Dump itself to a dict.
______________________________________________________________________
-
+
### method `dumps`
@@ -163,7 +163,7 @@ Dump itself to a string.
______________________________________________________________________
-
+
### method `load_dict`
@@ -183,7 +183,7 @@ Load itself from a string.
______________________________________________________________________
-
+
### classmethod `must_quantize_input`
@@ -205,7 +205,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe
______________________________________________________________________
-
+
### classmethod `op_type`
@@ -221,7 +221,7 @@ Get the type of this operation.
______________________________________________________________________
-
+
### method `prepare_output`
@@ -243,15 +243,15 @@ The calibrate method needs to be called with sample data before using this funct
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Execute the quantized forward.
@@ -267,7 +267,7 @@ Execute the quantized forward.
______________________________________________________________________
-
+
## class `QuantizedOpUnivariateOfEncrypted`
@@ -275,7 +275,7 @@ An univariate operator of an encrypted value.
This operation is not really operating as a quantized operation. It is useful when the computations get fused into a TLU, as in e.g., Act(x) = x || (x + 42)).
-
+
### method `__init__`
@@ -302,7 +302,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `calibrate`
@@ -322,7 +322,7 @@ Create corresponding QuantizedArray for the output of the activation function.
______________________________________________________________________
-
+
### method `call_impl`
@@ -343,7 +343,7 @@ Call self.impl to centralize mypy bug workaround.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -361,7 +361,7 @@ This operation can be fused and computed in float when a single integer tensor g
______________________________________________________________________
-
+
### method `dump`
@@ -377,7 +377,7 @@ Dump itself to a file.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -393,7 +393,7 @@ Dump itself to a dict.
______________________________________________________________________
-
+
### method `dumps`
@@ -409,7 +409,7 @@ Dump itself to a string.
______________________________________________________________________
-
+
### method `load_dict`
@@ -429,7 +429,7 @@ Load itself from a string.
______________________________________________________________________
-
+
### classmethod `must_quantize_input`
@@ -451,7 +451,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe
______________________________________________________________________
-
+
### classmethod `op_type`
@@ -467,7 +467,7 @@ Get the type of this operation.
______________________________________________________________________
-
+
### method `prepare_output`
@@ -489,15 +489,15 @@ The calibrate method needs to be called with sample data before using this funct
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Execute the quantized forward.
@@ -513,7 +513,7 @@ Execute the quantized forward.
______________________________________________________________________
-
+
## class `QuantizedMixingOp`
@@ -521,7 +521,7 @@ An operator that mixes (adds or multiplies) together encrypted inputs.
Mixing operators cannot be fused to TLUs.
-
+
### method `__init__`
@@ -549,7 +549,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `calibrate`
@@ -569,7 +569,7 @@ Create corresponding QuantizedArray for the output of the activation function.
______________________________________________________________________
-
+
### method `call_impl`
@@ -590,7 +590,7 @@ Call self.impl to centralize mypy bug workaround.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -608,7 +608,7 @@ Mixing operations cannot be fused since it must be performed over integer tensor
______________________________________________________________________
-
+
### method `cnp_round`
@@ -634,7 +634,7 @@ Round the input array to the specified number of bits.
______________________________________________________________________
-
+
### method `dump`
@@ -650,7 +650,7 @@ Dump itself to a file.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -666,7 +666,7 @@ Dump itself to a dict.
______________________________________________________________________
-
+
### method `dumps`
@@ -682,7 +682,7 @@ Dump itself to a string.
______________________________________________________________________
-
+
### method `load_dict`
@@ -702,7 +702,7 @@ Load itself from a string.
______________________________________________________________________
-
+
### method `make_output_quant_parameters`
@@ -728,7 +728,7 @@ Build a quantized array from quantized integer results of the op and quantizatio
______________________________________________________________________
-
+
### classmethod `must_quantize_input`
@@ -750,7 +750,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe
______________________________________________________________________
-
+
### classmethod `op_type`
@@ -766,7 +766,7 @@ Get the type of this operation.
______________________________________________________________________
-
+
### method `prepare_output`
@@ -788,15 +788,15 @@ The calibrate method needs to be called with sample data before using this funct
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Execute the quantized forward.
diff --git a/docs/developer-guide/api/concrete.ml.quantization.post_training.md b/docs/developer-guide/api/concrete.ml.quantization.post_training.md
index 12d91cb17..6f4de1017 100644
--- a/docs/developer-guide/api/concrete.ml.quantization.post_training.md
+++ b/docs/developer-guide/api/concrete.ml.quantization.post_training.md
@@ -14,7 +14,7 @@ Post Training Quantization methods.
______________________________________________________________________
-
+
## function `get_n_bits_dict`
@@ -36,7 +36,7 @@ Convert the n_bits parameter into a proper dictionary.
______________________________________________________________________
-
+
## class `ONNXConverter`
@@ -54,7 +54,7 @@ This class should be sub-classed to provide specific calibration and quantizatio
- `numpy_model` (NumpyModule): Model in numpy.
- `rounding_threshold_bits` (int): if not None, every accumulators in the model are rounded down to the given bits of precision
-
+
### method `__init__`
@@ -108,7 +108,7 @@ Get the number of bits to use for the quantization of any constants (usually wei
______________________________________________________________________
-
+
### method `quantize_module`
@@ -130,7 +130,7 @@ Following https://arxiv.org/abs/1712.05877 guidelines.
______________________________________________________________________
-
+
## class `PostTrainingAffineQuantization`
@@ -153,7 +153,7 @@ Create the quantized version of the passed numpy module.
- `QuantizedModule`: A quantized version of the numpy model.
-
+
### method `__init__`
@@ -207,7 +207,7 @@ Get the number of bits to use for the quantization of any constants (usually wei
______________________________________________________________________
-
+
### method `quantize_module`
@@ -229,7 +229,7 @@ Following https://arxiv.org/abs/1712.05877 guidelines.
______________________________________________________________________
-
+
## class `PostTrainingQATImporter`
@@ -237,7 +237,7 @@ Converter of Quantization Aware Training networks.
This class provides specific configuration for QAT networks during ONNX network conversion to Concrete ML computation graphs.
-
+
### method `__init__`
@@ -291,7 +291,7 @@ Get the number of bits to use for the quantization of any constants (usually wei
______________________________________________________________________
-
+
### method `quantize_module`
diff --git a/docs/developer-guide/api/concrete.ml.quantization.quantized_module.md b/docs/developer-guide/api/concrete.ml.quantization.quantized_module.md
index b5ddeb09a..e7643331b 100644
--- a/docs/developer-guide/api/concrete.ml.quantization.quantized_module.md
+++ b/docs/developer-guide/api/concrete.ml.quantization.quantized_module.md
@@ -216,7 +216,7 @@ forward(
*x: ndarray,
fhe: Union[FheMode, str] = ,
debug: bool = False
-) → Union[ndarray, Tuple[ndarray, ], Tuple[Union[Tuple[ndarray, ], ndarray], Dict[str, Dict[Union[int, str], Union[ndarray, QuantizedArray, NoneType]]]]]
+) → Union[ndarray, Tuple[ndarray, ], Tuple[Union[Tuple[ndarray, ], ndarray], Dict[str, Dict[Union[int, str], Union[ndarray, QuantizedArray, NoneType, bool, int, float]]]]]
```
Forward pass with numpy function only on floating points.
diff --git a/docs/developer-guide/api/concrete.ml.quantization.quantized_ops.md b/docs/developer-guide/api/concrete.ml.quantization.quantized_ops.md
index 5ef04d265..406f7b4ab 100644
--- a/docs/developer-guide/api/concrete.ml.quantization.quantized_ops.md
+++ b/docs/developer-guide/api/concrete.ml.quantization.quantized_ops.md
@@ -265,10 +265,10 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
@@ -312,10 +312,10 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
@@ -364,9 +364,9 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
@@ -485,9 +485,9 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
@@ -534,9 +534,9 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Reshape the input integer encrypted tensor.
@@ -607,10 +607,10 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Compute the quantized convolution between two quantized tensors.
@@ -665,27 +665,27 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `QuantizedMaxPool`
Quantized Max Pooling op.
-
+
### method `__init__`
@@ -712,7 +712,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -730,26 +730,26 @@ Max Pooling operation can not be fused since it must be performed over integer t
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `QuantizedPad`
Quantized Padding op.
-
+
### method `__init__`
@@ -776,7 +776,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -794,21 +794,21 @@ Pad operation cannot be fused since it must be performed over integer tensors.
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `QuantizedWhere`
@@ -816,7 +816,7 @@ Where operator on quantized arrays.
Supports only constants for the results produced on the True/False branches.
-
+
### method `__init__`
@@ -843,7 +843,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedCast`
@@ -863,7 +863,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedGreater`
@@ -871,7 +871,7 @@ Comparison operator >.
Only supports comparison with a constant.
-
+
### method `__init__`
@@ -898,7 +898,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedGreaterOrEqual`
@@ -906,7 +906,7 @@ Comparison operator >=.
Only supports comparison with a constant.
-
+
### method `__init__`
@@ -933,7 +933,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedLess`
@@ -941,7 +941,7 @@ Comparison operator \<.
Only supports comparison with a constant.
-
+
### method `__init__`
@@ -968,7 +968,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedLessOrEqual`
@@ -976,7 +976,7 @@ Comparison operator \<=.
Only supports comparison with a constant.
-
+
### method `__init__`
@@ -1003,7 +1003,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedOr`
@@ -1023,7 +1023,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedDiv`
@@ -1043,7 +1043,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedMul`
@@ -1063,7 +1063,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedSub`
@@ -1107,14 +1107,14 @@ ______________________________________________________________________
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `QuantizedBatchNormalization`
@@ -1132,7 +1132,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedFlatten`
@@ -1150,7 +1150,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1168,15 +1168,15 @@ Flatten operation cannot be fused since it must be performed over integer tensor
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Flatten the input integer encrypted tensor.
@@ -1192,13 +1192,13 @@ Flatten the input integer encrypted tensor.
______________________________________________________________________
-
+
## class `QuantizedReduceSum`
ReduceSum with encrypted input.
-
+
### method `__init__`
@@ -1239,7 +1239,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `calibrate`
@@ -1259,16 +1259,16 @@ Create corresponding QuantizedArray for the output of the activation function.
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
calibrate_rounding: bool = False,
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Sum the encrypted tensor's values along the given axes.
@@ -1285,7 +1285,7 @@ Sum the encrypted tensor's values along the given axes.
______________________________________________________________________
-
+
## class `QuantizedErf`
@@ -1303,7 +1303,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedNot`
@@ -1321,13 +1321,13 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedBrevitasQuant`
Brevitas uniform quantization with encrypted input.
-
+
### method `__init__`
@@ -1370,7 +1370,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `calibrate`
@@ -1390,15 +1390,15 @@ Create corresponding QuantizedArray for the output of Quantization function.
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Quantize values.
@@ -1414,7 +1414,7 @@ Quantize values.
______________________________________________________________________
-
+
## class `QuantizedTranspose`
@@ -1434,7 +1434,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1452,15 +1452,15 @@ Transpose can not be fused since it must be performed over integer tensors as it
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Transpose the input integer encrypted tensor.
@@ -1476,7 +1476,7 @@ Transpose the input integer encrypted tensor.
______________________________________________________________________
-
+
## class `QuantizedFloor`
@@ -1494,7 +1494,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedMax`
@@ -1512,7 +1512,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedMin`
@@ -1530,7 +1530,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedNeg`
@@ -1548,7 +1548,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedSign`
@@ -1566,7 +1566,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
## class `QuantizedUnsqueeze`
@@ -1584,7 +1584,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1602,15 +1602,15 @@ Unsqueeze can not be fused since it must be performed over integer tensors as it
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Unsqueeze the input tensors on a given axis.
@@ -1626,7 +1626,7 @@ Unsqueeze the input tensors on a given axis.
______________________________________________________________________
-
+
## class `QuantizedConcat`
@@ -1644,7 +1644,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1662,15 +1662,15 @@ Concatenation can not be fused since it must be performed over integer tensors a
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Concatenate the input tensors on a given axis.
@@ -1686,7 +1686,7 @@ Concatenate the input tensors on a given axis.
______________________________________________________________________
-
+
## class `QuantizedSqueeze`
@@ -1704,7 +1704,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1722,15 +1722,15 @@ Squeeze can not be fused since it must be performed over integer tensors as it r
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Squeeze the input tensors on a given axis.
@@ -1746,7 +1746,7 @@ Squeeze the input tensors on a given axis.
______________________________________________________________________
-
+
## class `ONNXShape`
@@ -1764,7 +1764,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1782,20 +1782,20 @@ This operation returns the shape of the tensor and thus can not be fused into a
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `ONNXConstantOfShape`
@@ -1813,7 +1813,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1831,7 +1831,7 @@ This operation returns a new encrypted tensor and thus can not be fused.
______________________________________________________________________
-
+
## class `ONNXGather`
@@ -1851,7 +1851,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1869,20 +1869,20 @@ This operation returns values from a tensor and thus can not be fused into a uni
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `ONNXSlice`
@@ -1900,7 +1900,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1918,20 +1918,20 @@ This operation returns values from a tensor and thus can not be fused into a uni
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
______________________________________________________________________
-
+
## class `QuantizedExpand`
@@ -1949,7 +1949,7 @@ Get the names of encrypted integer tensors that are used by this op.
______________________________________________________________________
-
+
### method `can_fuse`
@@ -1967,15 +1967,15 @@ Unsqueeze can not be fused since it must be performed over integer tensors as it
______________________________________________________________________
-
+
### method `q_impl`
```python
q_impl(
- *q_inputs: Optional[ndarray, QuantizedArray],
+ *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float],
**attrs
-) → Union[ndarray, QuantizedArray, NoneType]
+) → Union[ndarray, QuantizedArray, NoneType, bool, int, float]
```
Expand the input tensor to a specified shape.
@@ -1991,7 +1991,7 @@ Expand the input tensor to a specified shape.
______________________________________________________________________
-
+
## class `QuantizedEqual`
@@ -1999,7 +1999,7 @@ Comparison operator ==.
Only supports comparison with a constant.
-
+
### method `__init__`
diff --git a/docs/developer-guide/api/concrete.ml.quantization.quantizers.md b/docs/developer-guide/api/concrete.ml.quantization.quantizers.md
index 367af67b7..c4ed34e4e 100644
--- a/docs/developer-guide/api/concrete.ml.quantization.quantizers.md
+++ b/docs/developer-guide/api/concrete.ml.quantization.quantizers.md
@@ -789,7 +789,7 @@ See https://arxiv.org/abs/1712.05877.
```python
__init__(
n_bits,
- values: 'Optional[ndarray]',
+ values: 'Union[None, float, int, ndarray]',
value_is_float: 'bool' = True,
options: 'Optional[QuantizationOptions]' = None,
stats: 'Optional[MinMaxQuantizationStats]' = None,
@@ -800,23 +800,23 @@ __init__(
______________________________________________________________________
-
+
### method `dequant`
```python
-dequant() → ndarray
+dequant() → Union[ndarray, Tracer]
```
De-quantize self.qvalues.
**Returns:**
-- `numpy.ndarray`: De-quantized values.
+- `Union[numpy.ndarray, Tracer]`: De-quantized values.
______________________________________________________________________
-
+
### method `dump`
@@ -832,7 +832,7 @@ Dump itself to a file.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -848,7 +848,7 @@ Dump itself to a dict.
______________________________________________________________________
-
+
### method `dumps`
@@ -864,7 +864,7 @@ Dump itself to a string.
______________________________________________________________________
-
+
### method `load_dict`
@@ -884,56 +884,58 @@ Load itself from a string.
______________________________________________________________________
-
+
### method `quant`
```python
-quant() → Optional[ndarray]
+quant() → Union[ndarray, Tracer]
```
Quantize self.values.
**Returns:**
-- `numpy.ndarray`: Quantized values.
+- `Union[numpy.ndarray, Tracer]`: Quantized values.
______________________________________________________________________
-
+
### method `update_quantized_values`
```python
-update_quantized_values(qvalues: 'ndarray') → ndarray
+update_quantized_values(
+ qvalues: 'Union[ndarray, Tracer]'
+) → Union[ndarray, Tracer]
```
Update qvalues to get their corresponding values using the related quantized parameters.
**Args:**
-- `qvalues` (numpy.ndarray): Values to replace self.qvalues
+- `qvalues` (Union\[numpy.ndarray, Tracer\]): Values to replace self.qvalues
**Returns:**
-- `values` (numpy.ndarray): Corresponding values
+- `values` (Union\[numpy.ndarray, Tracer\]): Corresponding values
______________________________________________________________________
-
+
### method `update_values`
```python
-update_values(values: 'ndarray') → ndarray
+update_values(values: 'Union[ndarray, Tracer]') → Union[ndarray, Tracer]
```
Update values to get their corresponding qvalues using the related quantized parameters.
**Args:**
-- `values` (numpy.ndarray): Values to replace self.values
+- `values` (Union\[numpy.ndarray, Tracer\]): Values to replace self.values
**Returns:**
-- `qvalues` (numpy.ndarray): Corresponding qvalues
+- `qvalues` (Union\[numpy.ndarray, Tracer\]): Corresponding qvalues
diff --git a/docs/developer-guide/api/concrete.ml.sklearn.linear_model.md b/docs/developer-guide/api/concrete.ml.sklearn.linear_model.md
index 68fa38e31..6ead761ff 100644
--- a/docs/developer-guide/api/concrete.ml.sklearn.linear_model.md
+++ b/docs/developer-guide/api/concrete.ml.sklearn.linear_model.md
@@ -223,7 +223,7 @@ Using this attribute is deprecated.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -233,7 +233,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### method `fit`
@@ -284,7 +284,7 @@ get_sklearn_params(deep: bool = True) → dict
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -294,7 +294,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
### method `partial_fit`
@@ -328,7 +328,7 @@ post_processing(y_preds: ndarray) → ndarray
______________________________________________________________________
-
+
### method `predict_proba`
@@ -368,7 +368,7 @@ The justification for the formula in the loss="modified_huber" case is in the ap
______________________________________________________________________
-
+
## class `SGDRegressor`
@@ -382,7 +382,7 @@ An FHE linear regression model fitted with stochastic gradient descent.
For more details on SGDRegressor please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
-
+
### method `__init__`
@@ -457,7 +457,7 @@ Is None if the model is not fitted.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -467,7 +467,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -477,7 +477,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
## class `ElasticNet`
@@ -491,7 +491,7 @@ An ElasticNet regression model with FHE.
For more details on ElasticNet please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html
-
+
### method `__init__`
@@ -559,7 +559,7 @@ Is None if the model is not fitted.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -569,7 +569,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -579,7 +579,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
## class `Lasso`
@@ -593,7 +593,7 @@ A Lasso regression model with FHE.
For more details on Lasso please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html
-
+
### method `__init__`
@@ -660,7 +660,7 @@ Is None if the model is not fitted.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -670,7 +670,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -680,7 +680,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
## class `Ridge`
@@ -694,7 +694,7 @@ A Ridge regression model with FHE.
For more details on Ridge please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html
-
+
### method `__init__`
@@ -759,7 +759,7 @@ Is None if the model is not fitted.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -769,7 +769,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -779,7 +779,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
## class `LogisticRegression`
@@ -793,7 +793,7 @@ A logistic regression model with FHE.
For more details on LogisticRegression please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
-
+
### method `__init__`
@@ -888,7 +888,7 @@ Using this attribute is deprecated.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -898,7 +898,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### classmethod `load_dict`
diff --git a/docs/developer-guide/api/concrete.ml.sklearn.qnn.md b/docs/developer-guide/api/concrete.ml.sklearn.qnn.md
index 312790534..74de5a9c9 100644
--- a/docs/developer-guide/api/concrete.ml.sklearn.qnn.md
+++ b/docs/developer-guide/api/concrete.ml.sklearn.qnn.md
@@ -120,7 +120,7 @@ Get the output quantizers.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -130,7 +130,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### method `fit`
@@ -145,7 +145,7 @@ fit(
______________________________________________________________________
-
+
### method `fit_benchmark`
@@ -160,7 +160,7 @@ fit_benchmark(
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -170,7 +170,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
### method `predict`
@@ -183,7 +183,7 @@ predict(
______________________________________________________________________
-
+
### method `predict_proba`
@@ -196,7 +196,7 @@ predict_proba(
______________________________________________________________________
-
+
## class `NeuralNetClassifier`
@@ -206,7 +206,7 @@ This class wraps a quantized neural network implemented using Torch tools as a s
Inputs that are float64 will be casted to float32 before training as Torch does not handle float64 types properly. Thus should not have a significant impact on the model's performances. If the targets are integers of lower bit-width, they will be safely casted to int64. Else, an error is raised.
-
+
### method `__init__`
@@ -331,7 +331,7 @@ Using this attribute is deprecated.
______________________________________________________________________
-
+
### method `dump_dict`
@@ -341,7 +341,7 @@ dump_dict() → Dict[str, Any]
______________________________________________________________________
-
+
### method `fit`
@@ -356,7 +356,7 @@ fit(
______________________________________________________________________
-
+
### method `fit_benchmark`
@@ -371,7 +371,7 @@ fit_benchmark(
______________________________________________________________________
-
+
### classmethod `load_dict`
@@ -381,7 +381,7 @@ load_dict(metadata: Dict)
______________________________________________________________________
-
+
### method `predict`
@@ -394,7 +394,7 @@ predict(
______________________________________________________________________
-
+
### method `predict_proba`
diff --git a/docs/developer-guide/api/concrete.ml.torch.compile.md b/docs/developer-guide/api/concrete.ml.torch.compile.md
index 92875e204..64883b1a5 100644
--- a/docs/developer-guide/api/concrete.ml.torch.compile.md
+++ b/docs/developer-guide/api/concrete.ml.torch.compile.md
@@ -91,7 +91,7 @@ Take a model in torch or ONNX, turn it to numpy, quantize its inputs / weights /
______________________________________________________________________
-
+
## function `compile_torch_model`
@@ -125,7 +125,9 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs
- `configuration` (Configuration): Configuration object to use during compilation
- `artifacts` (DebugArtifacts): Artifacts object to fill during compilation
- `show_mlir` (bool): if set, the MLIR produced by the converter and which is going to be sent to the compiler backend is shown on the screen, e.g., for debugging or demo
-- `n_bits`: the number of bits for the quantization
+- `n_bits` (Union\[int, Dict\[str, int\]\]): number of bits for quantization, can be a single value or a dictionary with the following keys :
+ \- "op_inputs" and "op_weights" (mandatory)
+ \- "model_inputs" and "model_outputs" (optional, default to 5 bits). When using a single integer for n_bits, its value is assigned to "op_inputs" and "op_weights" bits. Default is 8 bits.
- `rounding_threshold_bits` (int): if not None, every accumulators in the model are rounded down to the given bits of precision
- `p_error` (Optional\[float\]): probability of error of a single PBS
- `global_p_error` (Optional\[float\]): probability of error of the full circuit. In FHE simulation `global_p_error` is set to 0
@@ -139,7 +141,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs
______________________________________________________________________
-
+
## function `compile_onnx_model`
@@ -173,7 +175,9 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs
- `configuration` (Configuration): Configuration object to use during compilation
- `artifacts` (DebugArtifacts): Artifacts object to fill during compilation
- `show_mlir` (bool): if set, the MLIR produced by the converter and which is going to be sent to the compiler backend is shown on the screen, e.g., for debugging or demo
-- `n_bits`: the number of bits for the quantization
+- `n_bits` (Union\[int, Dict\[str, int\]\]): number of bits for quantization, can be a single value or a dictionary with the following keys :
+ \- "op_inputs" and "op_weights" (mandatory)
+ \- "model_inputs" and "model_outputs" (optional, default to 5 bits). When using a single integer for n_bits, its value is assigned to "op_inputs" and "op_weights" bits. Default is 8 bits.
- `rounding_threshold_bits` (int): if not None, every accumulators in the model are rounded down to the given bits of precision
- `p_error` (Optional\[float\]): probability of error of a single PBS
- `global_p_error` (Optional\[float\]): probability of error of the full circuit. In FHE simulation `global_p_error` is set to 0
@@ -187,7 +191,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs
______________________________________________________________________
-
+
## function `compile_brevitas_qat_model`
diff --git a/pyproject.toml b/pyproject.toml
index 770db77f4..57111c87e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "concrete-ml"
-version = "1.4.0"
+version = "1.4.1"
description = "Concrete ML is an open-source set of tools which aims to simplify the use of fully homomorphic encryption (FHE) for data scientists."
license = "BSD-3-Clause-Clear"
authors = [
diff --git a/src/concrete/ml/version.py b/src/concrete/ml/version.py
index d598872a5..3fc8d961b 100644
--- a/src/concrete/ml/version.py
+++ b/src/concrete/ml/version.py
@@ -1,3 +1,3 @@
"""File to manage the version of the package."""
# Auto-generated by "make set_version" do not modify
-__version__ = "1.4.0"
+__version__ = "1.4.1"