text
stringlengths
1
2.05k
class Reshape(): def __init__(self, layer): return def backward(self, layer, transcript, config): reshape_layer = { 'layer_type': 'Reshape', 'params': [], 'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])], 'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])], 'inp_shapes': [layer['out_shapes'][0]], 'out_shapes': [layer['inp_shapes'][0]], 'mask': [], } transcript.append(reshape_layer) def produce_graph(): with open("examples/v2_1.0_224_truncated/model.msgpack", "rb") as data_file: byte_data = data_file.read() model = msgpack.unpackb(byte_data) softmax_output_index = int(np.max( [[out for out in layer['out_idxes']] for layer in model['layers']] + [[inp for inp in layer['inp_idxes']] for layer in model['layers']] )[0]) circuit_config = CircuitConfig(softmax_output_index + 1) circuit_config.new_label_tensor() transcript = [] for layer in reversed(model['layers']): fetched_layer = None match layer['layer_type']: case "Conv2D": fetched_layer = Conv2D(layer) case "AveragePool2D": fetched_layer = AveragePool2D(layer) case "Softmax": fetched_layer = Softmax(layer) case _: fetched_layer = Reshape(layer) print(layer['layer_type']) fetched_layer.backward(layer, transcript, circuit_config) print('----------------') model['layers'] += transcript model['inp_idxes'].append(circuit_config.label_tensor_idx) model['out_idxes'] = [31] packed = msgpack.packb(model, use_bin_type=True) with open("./examples/train_graph/train.msgpack", 'wb') as f: f.write(packed) print(model.keys()) return model model = produce_graph() print(model.keys()) model['tensors'] = "" print(model['inp_idxes'], model['out_idxes'])
import tensorflow as tf import numpy as np import msgpack from tensorflow import keras mnist = tf.keras.datasets.mnist (images_train, labels_train), (images_test, labels_test) = mnist.load_data() x = images_test[0] y = labels_test[0] print(y) x = x.flatten() / 255. x = x.astype(np.float32) print(x.dtype, x.shape) np.save('5.npy', x)
import argparse
import ast from typing
import Literal, Union
import tensorflow as tf
import numpy as np
import tflite
import msgpack def get_shape(interpreter: tf.lite.Interpreter, tensor_idx): if tensor_idx == -1: return [] tensor = interpreter.get_tensor(tensor_idx) return list(tensor.shape) def handle_numpy_or_literal(inp: Union[np.ndarray, Literal[0]]): if isinstance(inp, int): return np.array([inp]) return inp def get_inputs(op: tflite.Operator): idxes = handle_numpy_or_literal(op.InputsAsNumpy()) idxes = idxes.tolist() idxes = list(filter(lambda x: x != -1, idxes)) return idxes class Converter: def __init__( self, model_path, scale_factor, k, num_cols, num_randoms, use_selectors, commit, expose_output ): self.model_path = model_path self.scale_factor = scale_factor self.k = k self.num_cols = num_cols self.num_randoms = num_randoms self.use_selectors = use_selectors self.commit = commit self.expose_output = expose_output self.interpreter = tf.lite.Interpreter( model_path=self.model_path, experimental_preserve_all_tensors=True ) self.interpreter.allocate_tensors() with open(self.model_path, 'rb') as f: buf = f.read() self.model = tflite.Model.GetRootAsModel(buf, 0) self.graph = self.model.Subgraphs(0) def valid_activations(self): return [ tflite.ActivationFunctionType.NONE, tflite.ActivationFunctionType.RELU, tflite.ActivationFunctionType.RELU6, ] def _convert_add(self, op: tflite.Operator, generated_tensors: set): op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('Add options is None') opt = tflite.AddOptions() opt.Init(op_opt.Bytes, op_opt.Pos) params = [opt.FusedActivationFunction()] inputs = get_inputs(op) print(generated_tensors) print('Add inputs: ', inputs) if len(inputs) != 2: raise RuntimeError('Add must have 2 inputs') print(inputs[0] in generated_tensors, inputs[1] in generated_tensors) if (inputs[0] in generated_tensors) and (inputs[1] in generated_tensors): retu
rn ('Add', params) nb_generated = (inputs[0] in generated_tensors) + (inputs[1] in generated_tensors) if nb_generated != 1: raise RuntimeError('Add must have 1 generated tensor') const_tensor = self.interpreter.get_tensor(inputs[0]) if inputs[0] not in generated_tensors else self.interpreter.get_tensor(inputs[1]) if np.any(const_tensor == -np.inf): if not np.all(np.logical_or(np.isneginf(const_tensor), const_tensor == 0)): raise RuntimeError('Add constant tensor must be -inf and 0 only') mask = (const_tensor == -np.inf).astype(np.int64) params = [len(mask.shape)] + list(mask.shape) params += mask.flatten().tolist() return ('MaskNegInf', params) else: return ('Add', params) def to_dict(self, start_layer, end_layer): interpreter = self.interpreter model = self.model graph = self.graph if graph is None: raise RuntimeError('Graph is None') input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() for inp_detail in input_details: inp = np.zeros(inp_detail['shape'], dtype=inp_detail['dtype']) interpreter.set_tensor(inp_detail['index'], inp) interpreter.invoke() generated_tensor_idxes = set() for inp in input_details: generated_tensor_idxes.add(inp['index']) layers = [] keep_tensors = set() adjusted_tensors = {} for op_idx in range(graph.OperatorsLength()): op = graph.Operators(op_idx) if op is None: raise RuntimeError('Operator is None') model_opcode = model.OperatorCodes(op.OpcodeIndex()) if model_opcode is None: raise RuntimeError('Operator code is None') op_code = model_opcode.BuiltinCode() for output in handle_numpy_or_literal(op.OutputsAsNumpy()): generated_tensor_idxes.add(output) if op_idx < start_layer: continue if op_idx > end_layer: break for input in handle_numpy_or_literal(op
.InputsAsNumpy()): keep_tensors.add(input) if op_code == tflite.BuiltinOperator.AVERAGE_POOL_2D: layer_type = 'AveragePool2D' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('AvgPool2D options is None') opt = tflite.Pool2DOptions() opt.Init(op_opt.Bytes, op_opt.Pos) params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()] elif op_code == tflite.BuiltinOperator.MAX_POOL_2D: layer_type = 'MaxPool2D' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('MaxPool2D options is None') opt = tflite.Pool2DOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.Padding() == tflite.Padding.SAME: raise NotImplementedError('SAME padding is not supported') if opt.FusedActivationFunction() != tflite.ActivationFunctionType.NONE: raise NotImplementedError('Fused activation is not supported') params = [opt.FilterHeight(), opt.FilterWidth(), opt.StrideH(), opt.StrideW()] elif op_code == tflite.BuiltinOperator.CUSTOM: layer_type = 'Conv2D' activation = 0 weights = self.interpreter.get_tensor(op.Inputs(1)) weights = np.transpose(weights, (3, 0, 1, 2)) weights = (weights * self.scale_factor).round().astype(np.int64) adjusted_tensors[op.Inputs(1)] = weights params = [0, 1, activation, 1, 1] elif op_code == tflite.BuiltinOperator.CONV_2D: layer_type = 'Conv2D' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('Conv2D options is None') opt = tflite.Conv2DOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1: raise NotImplementedError('Dilation is not supported') if opt.FusedActivationFunction() not in self.valid_activations(): raise NotImplementedError('Unsupported activation fun
ction at layer {op_idx}') params = \ [0] + \ [opt.Padding()] + \ [opt.FusedActivationFunction()] + \ [opt.StrideH(), opt.StrideW()] elif op_code == tflite.BuiltinOperator.DEPTHWISE_CONV_2D: layer_type = 'Conv2D' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('DepthwiseConv2D options is None') opt = tflite.DepthwiseConv2DOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.DilationHFactor() != 1 or opt.DilationWFactor() != 1: raise NotImplementedError('Dilation is not supported') if opt.FusedActivationFunction() not in self.valid_activations(): raise NotImplementedError('Unsupported activation function at layer {op_idx}') params = \ [1] + \ [opt.Padding()] + \ [opt.FusedActivationFunction()] + \ [opt.StrideH(), opt.StrideW()] elif op_code == tflite.BuiltinOperator.FULLY_CONNECTED: layer_type = 'FullyConnected' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('Fully connected options is None') opt = tflite.FullyConnectedOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.FusedActivationFunction() not in self.valid_activations(): raise NotImplementedError(f'Unsupported activation function at layer {op_idx}') params = [opt.FusedActivationFunction()] elif op_code == tflite.BuiltinOperator.BATCH_MATMUL: layer_type = 'BatchMatMul' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('BatchMatMul options is None') opt = tflite.BatchMatMulOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.AdjX() is True: raise NotImplementedError('AdjX is not supported') params = [int(opt.AdjX()), int(opt.AdjY())] elif op_code == tflite.BuiltinOperator.ADD: layer_type, params = self._convert_
add(op, generated_tensor_idxes) elif op_code == tflite.BuiltinOperator.MUL: layer_type = 'Mul' params = [] elif op_code == tflite.BuiltinOperator.SUB: sub_val = interpreter.get_tensor(op.Inputs(1)) if np.any(np.isin(sub_val, 10000)): layer_type = 'MaskNegInf' mask = (sub_val == 10000).astype(np.int64) params = [len(mask.shape)] + list(mask.shape) params += mask.flatten().tolist() else: layer_type = 'Sub' params = [] elif op_code == tflite.BuiltinOperator.DIV: layer_type = 'Mul' div_val = interpreter.get_tensor(op.Inputs(1)) if type(div_val) != np.float32: raise NotImplementedError('Only support one divisor') adjusted_tensors[op.Inputs(1)] = np.array([(self.scale_factor / div_val).round().astype(np.int64)]) params = [] elif op_code == tflite.BuiltinOperator.PAD: layer_type = 'Pad' tensor_idx = op.Inputs(1) tensor = interpreter.get_tensor(tensor_idx).flatten().astype(np.int64) params = tensor.tolist() elif op_code == tflite.BuiltinOperator.SOFTMAX: layer_type = 'Softmax' if layers[-1]['layer_type'] == 'MaskNegInf': params = layers[-1]['params'] elif layers[-2]['layer_type'] == 'MaskNegInf': params = layers[-2]['params'] params = [params[0] - 1] + params[2:] else: params = [] elif op_code == tflite.BuiltinOperator.MEAN: layer_type = 'Mean' inp_shape = interpreter.get_tensor(op.Inputs(0)).shape mean_idxes = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64) if len(mean_idxes) + 2 != len(inp_shape): raise NotImplementedError(f'Only mean over all but one axis is supported: {op_idx}') params = mean_idxes.tolist() elif op_code == tflite.BuiltinOperator.SQUARE: layer_type = 'Square' para
ms = [] elif op_code == tflite.BuiltinOperator.SQUARED_DIFFERENCE: layer_type = 'SquaredDifference' params = [] elif op_code == tflite.BuiltinOperator.RSQRT: layer_type = 'Rsqrt' params = [] elif op_code == tflite.BuiltinOperator.LOGISTIC: layer_type = 'Logistic' params = [] elif op_code == tflite.BuiltinOperator.TANH: layer_type = 'Tanh' params = [] elif op_code == tflite.BuiltinOperator.POW: layer_type = 'Pow' power = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.float32) if power != 3.: raise NotImplementedError(f'Only support power 3') power = power.round().astype(np.int64) if len(power) != 1: raise NotImplementedError(f'Only scalar power is supported: {op_idx}') params = power.tolist() elif op_code == tflite.BuiltinOperator.SHAPE: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.GATHER: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.REDUCE_PROD: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.STRIDED_SLICE: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.BROADCAST_ARGS: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.BROADCAST_TO: layer_type = 'Noop' params = [0] elif op_code == tflite.BuiltinOperator.RESHAPE: layer_type = 'Reshape' params = [] elif op_code == tflite.BuiltinOperator.TRANSPOSE: layer_type = 'Transpose' params = get_shape(interpreter, op.Inputs(0)) + interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist() elif op_code == tflite.BuiltinOperator.CONCATENATION: layer_type = 'Concatenation' op_opt = op.BuiltinOptions() if op_opt is None:
raise RuntimeError('Concatenation options is None') opt = tflite.ConcatenationOptions() opt.Init(op_opt.Bytes, op_opt.Pos) params = [opt.Axis()] elif op_code == tflite.BuiltinOperator.PACK: layer_type = 'Pack' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('Pack options is None') opt = tflite.PackOptions() opt.Init(op_opt.Bytes, op_opt.Pos) params = [opt.Axis()] if params[0] > 1: raise NotImplementedError(f'Only axis=0,1 supported at layer {op_idx}') elif op_code == tflite.BuiltinOperator.SPLIT: layer_type = 'Split' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('Split options is None') opt = tflite.SplitOptions() opt.Init(op_opt.Bytes, op_opt.Pos) axis = interpreter.get_tensor(op.Inputs(0)).flatten().astype(np.int64)[0] num_splits = opt.NumSplits() inp = interpreter.get_tensor(op.Inputs(1)) if inp.shape[axis] % num_splits != 0: raise NotImplementedError(f'Only equal splits supported at layer {op_idx}') params = [int(axis), num_splits] elif op_code == tflite.BuiltinOperator.SLICE: layer_type = 'Slice' begin = interpreter.get_tensor(op.Inputs(1)).flatten().astype(np.int64).tolist() size = interpreter.get_tensor(op.Inputs(2)).flatten().astype(np.int64).tolist() params = begin + size elif op_code == tflite.BuiltinOperator.RESIZE_NEAREST_NEIGHBOR: layer_type = 'ResizeNearestNeighbor' op_opt = op.BuiltinOptions() if op_opt is None: raise RuntimeError('ResizeNearestNeighbor options is None') opt = tflite.ResizeNearestNeighborOptions() opt.Init(op_opt.Bytes, op_opt.Pos) if opt.AlignCorners(): raise NotImplementedError(f'Align corners not supported at layer {op_idx}') if not opt.HalfPixelCenters(): raise NotImplementedError(f'Half pixel centers not
supported at layer {op_idx}') params = [int(opt.AlignCorners()), int(opt.HalfPixelCenters())] else: op_name = None for attr in dir(tflite.BuiltinOperator): if not attr.startswith('__'): if getattr(tflite.BuiltinOperator, attr) == op_code: op_name = attr raise NotImplementedError('Unsupported operator at layer {}: {}, {}'.format(op_idx, op_code, op_name)) inp_idxes = get_inputs(op) rsqrt_overflows = [99, 158, 194, 253, 289, 348] if op_idx in rsqrt_overflows: if op_code == tflite.BuiltinOperator.RSQRT: mask = [0, 1] else: mask = [] else: mask = [] layers.append({ 'layer_type': layer_type, 'inp_idxes': inp_idxes, 'inp_shapes': [get_shape(interpreter, inp_idx) for inp_idx in inp_idxes], 'out_idxes': [op.Outputs(i) for i in range(op.OutputsLength())], 'out_shapes': [get_shape(interpreter, op.Outputs(i)) for i in range(op.OutputsLength())], 'params': params, 'mask': mask, }) print(layers) print() print('keep tensors:', keep_tensors) tensors = [] for tensor_idx in range(graph.TensorsLength()): if tensor_idx not in keep_tensors: continue tensor = graph.Tensors(tensor_idx) if tensor is None: raise NotImplementedError('Tensor is None') if tensor_idx in generated_tensor_idxes: print(f'skipping generated tensor: {format(tensor_idx)}, {tensor.Name()}') continue shape = [] for i in range(tensor.ShapeLength()): shape.append(int(tensor.Shape(i))) if shape == []: shape = [1] tensor_data = interpreter.get_tensor(tensor_idx) if tensor.Type() == tflite.TensorType.FLOAT32: tensor_data = (tensor_data * self.scale_factor).round().astype(np.int64) elif tensor.Type() == tflite.TensorType.INT32: tensor_data = tensor_data.astype(np.int64) elif tensor
.Type() == tflite.TensorType.INT64: continue else: raise NotImplementedError('Unsupported tensor type: {}'.format(tensor.Type())) if tensor_idx in adjusted_tensors: tensor_data = adjusted_tensors[tensor_idx] shape = tensor_data.shape tensors.append({ 'idx': tensor_idx, 'shape': shape, 'data': tensor_data.flatten().tolist(), }) commit_before = [] commit_after = [] if self.commit: input_tensors = [inp['index'] for inp in input_details] weight_tensors = [tensor['idx'] for tensor in tensors if tensor['idx'] not in input_tensors] commit_before = [weight_tensors, input_tensors] output_tensors = [out['index'] for out in output_details] commit_after = [output_tensors] out_idxes = layers[-1]['out_idxes'] if self.expose_output else [] d = { 'global_sf': self.scale_factor, 'k': self.k, 'num_cols': self.num_cols, 'num_random': self.num_randoms, 'inp_idxes': [inp['index'] for inp in input_details], 'out_idxes': out_idxes, 'layers': layers, 'tensors': tensors, 'use_selectors': self.use_selectors, 'commit_before': commit_before, 'commit_after': commit_after, } print() print(d['layers'][-1]) print(d.keys()) print(d['out_idxes']) return d def to_msgpack(self, start_layer, end_layer, use_selectors=True): d = self.to_dict(start_layer, end_layer) model_packed = msgpack.packb(d, use_bin_type=True) d['tensors'] = [] config_packed = msgpack.packb(d, use_bin_type=True) return model_packed, config_packed def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, required=True) parser.add_argument('--model_output', type=str, required=True) parser.add_argument('--config_output', type=str, required=True) parser.add_argument('--scale_factor', type=int, default=2**16) parser.add_argument('--k', type=int, default=19) parser.add_ar
gument('--eta', type=float, default=0.001) parser.add_argument('--num_cols', type=int, default=6) parser.add_argument('--use_selectors', action=argparse.BooleanOptionalAction, required=False, default=True) parser.add_argument('--commit', action=argparse.BooleanOptionalAction, required=False, default=False) parser.add_argument('--expose_output', action=argparse.BooleanOptionalAction, required=False, default=True) parser.add_argument('--start_layer', type=int, default=0) parser.add_argument('--end_layer', type=int, default=10000) parser.add_argument('--num_randoms', type=int, default=20001) args = parser.parse_args() converter = Converter( args.model, args.scale_factor, args.k, args.num_cols, args.num_randoms, args.use_selectors, args.commit, args.expose_output, ) model_packed, config_packed = converter.to_msgpack( start_layer=args.start_layer, end_layer=args.end_layer, ) if model_packed is None: raise Exception('Failed to convert model') with open(args.model_output, 'wb') as f: f.write(model_packed) with open(args.config_output, 'wb') as f: f.write(config_packed) if __name__ == '__main__': main()
import argparse import ast import numpy as np import msgpack def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_config', type=str, required=True) parser.add_argument('--inputs', type=str, required=True) parser.add_argument('--output', type=str, required=True) args = parser.parse_args() inputs = args.inputs.split(',') with open(args.model_config, 'rb') as f: model_config = msgpack.unpackb(f.read()) input_idxes = model_config['inp_idxes'] scale_factor = model_config['global_sf'] # Get the input shapes from the layers input_shapes = [[0] for _ in input_idxes] for layer in model_config['layers']: for layer_inp_idx, layer_shape in zip(layer['inp_idxes'], layer['inp_shapes']): for index, inp_idx in enumerate(input_idxes): if layer_inp_idx == inp_idx: input_shapes[index] = layer_shape tensors = [] for inp, shape, idx in zip(inputs, input_shapes, input_idxes): tensor = np.load(inp).reshape(shape) tensor = (tensor * scale_factor).round().astype(np.int64) tensors.append({ 'idx': idx, 'shape': shape, 'data': tensor.flatten().tolist(), }) packed = msgpack.packb(tensors, use_bin_type=True) with open(args.output, 'wb') as f: f.write(packed) if __name__ == '__main__': main()
# A converter for training data # Performs the conversion npy -> msgpack # TODO: Ensure that training works with models that take in multiple input shapes # # Shortcut: # `python3 python/training_converter.py --input_shapes 7,7,320 --input_idxes 1,0 --output training_data/inputs.msgpack --labels_output training_data/labels.msgpack` # import argparse import ast import numpy as np import msgpack import os NUM_LOADS = 1 SF = 1 << 17 def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_shapes', type=str, required=True) parser.add_argument('--output', type=str, required=True) TRAINING_DIRECTORY = './testing/data/pre_last_conv/flowers/train' args = parser.parse_args() input_shapes = ast.literal_eval(args.input_shapes) loaded = 0 tensors = [] num_classes = os.listdir(TRAINING_DIRECTORY) first_file = "0.npy" for file_name in os.listdir(TRAINING_DIRECTORY): if loaded == NUM_LOADS: break label = int(first_file[:-4]) data_array = np.load(TRAINING_DIRECTORY + '/' + first_file) input_shape = input_shapes for idx in range(data_array.shape[0]): print(SF) print((np.vstack(data_array) * SF).round().astype(np.int64)) tensors.append({ 'idx': 0, 'shape': input_shape, 'data': list(map(lambda x: int(x), list((data_array[idx] * SF).round().astype(np.int64).flatten()))), }) # represent the label as a one hot encoding one_hot = np.zeros(102) one_hot[label] = SF print("IMPORTANT LABEL", label) print("IMPORTANT LABEL", data_array[idx].flatten()[:500]) # print(one_hot.shape()) tensors.append({ 'idx': 11, 'shape': (1, 102), 'data': list(map(lambda x: int(x), one_hot)), }) loaded += 1 if loaded == NUM_LOADS: break packed_inputs = msgpack.packb(tensors, use_bin_type=True) # print(tensors) with open(args.output, 'wb') as f: f.write(packed_inputs) if __name__ == '__main__': main()
use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr}; use zkml::{ model::ModelCircuit, utils::{ helpers::get_public_values, loader::{load_model_msgpack, ModelMsgpack}, }, }; fn main() { let config_fname = std::env::args().nth(1).expect("config file path"); let inp_fname = std::env::args().nth(2).expect("input file path"); let config: ModelMsgpack = load_model_msgpack(&config_fname, &inp_fname); let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname); let _prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap(); let public_vals = get_public_values(); let prover = MockProver::run(config.k.try_into().unwrap(), &circuit, vec![public_vals]).unwrap(); assert_eq!(prover.verify(), Ok(())); }
use halo2_proofs::halo2curves::{bn256::Fr, pasta::Fp}; use zkml::{ model::ModelCircuit, utils::{proving_ipa::time_circuit_ipa, proving_kzg::time_circuit_kzg}, }; fn main() { let config_fname = std::env::args().nth(1).expect("config file path"); let inp_fname = std::env::args().nth(2).expect("input file path"); let kzg_or_ipa = std::env::args().nth(3).expect("kzg or ipa"); if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" { panic!("Must specify kzg or ipa"); } if kzg_or_ipa == "kzg" { let circuit = ModelCircuit::<Fr>::generate_from_file(&config_fname, &inp_fname); time_circuit_kzg(circuit); } else { let circuit = ModelCircuit::<Fp>::generate_from_file(&config_fname, &inp_fname); time_circuit_ipa(circuit); } }
use halo2_proofs::halo2curves::bn256::Fr; use zkml::{ model::ModelCircuit, utils::{loader::load_config_msgpack, proving_kzg::verify_circuit_kzg}, }; fn main() { let config_fname = std::env::args().nth(1).expect("config file path"); let vkey_fname = std::env::args().nth(2).expect("verification key file path"); let proof_fname = std::env::args().nth(3).expect("proof file path"); let public_vals_fname = std::env::args().nth(4).expect("public values file path"); let kzg_or_ipa = std::env::args().nth(5).expect("kzg or ipa"); if kzg_or_ipa != "kzg" && kzg_or_ipa != "ipa" { panic!("Must specify kzg or ipa"); } if kzg_or_ipa == "kzg" { let config = load_config_msgpack(&config_fname); let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false); println!("Loaded configuration"); verify_circuit_kzg(circuit, &vkey_fname, &proof_fname, &public_vals_fname); } else { // Serialization of the verification key doesn't seem to be supported for IPA panic!("Not implemented"); } }
use std::fs::File; use halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr}; use zkml::{ model::ModelCircuit, utils::{ helpers::get_public_values, loader::{load_config_msgpack, ModelMsgpack, TensorMsgpack}, }, }; fn main() { let config_fname = std::env::args().nth(1).expect("config file path"); let wav_fname = std::env::args().nth(2).expect("wav file path"); let mut wav_file = File::open(wav_fname).unwrap(); let (_header, data) = wav::read(&mut wav_file).unwrap(); let data = match data { wav::BitDepth::Sixteen(data) => data, _ => panic!("Unsupported bit depth"), }; let data: Vec<i64> = data.iter().map(|x| *x as i64).collect(); let base_config = load_config_msgpack(&config_fname); let config = ModelMsgpack { tensors: vec![TensorMsgpack { idx: 0, shape: vec![1, data.len().try_into().unwrap()], data: data, }], inp_idxes: vec![0], out_idxes: vec![], layers: vec![], commit_before: Some(vec![]), commit_after: Some(vec![vec![0]]), ..base_config }; println!("Config: {:?}", config); let k = config.k; let circuit = ModelCircuit::<Fr>::generate_from_msgpack(config, false); let _prover = MockProver::run(k.try_into().unwrap(), &circuit, vec![vec![]]).unwrap(); let public_vals: Vec<Fr> = get_public_values(); println!("Public values: {:?}", public_vals); }
pub mod commit; pub mod packer; pub mod poseidon_commit;
use std::{collections::HashMap, rc::Rc}; use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error}; use crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc}; pub trait Commit<F: PrimeField> { fn commit( &self, layouter: impl Layouter<F>, gadget_config: Rc<GadgetConfig>, constants: &HashMap<i64, CellRc<F>>, values: &Vec<CellRc<F>>, blinding: CellRc<F>, ) -> Result<Vec<CellRc<F>>, Error>; }
use std::{ cmp::{max, min}, collections::{BTreeMap, HashMap}, marker::PhantomData, rc::Rc, }; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use ndarray::{Array, IxDyn}; use crate::{ gadgets::gadget::{GadgetConfig, GadgetType}, layers::layer::{AssignedTensor, CellRc}, }; const NUM_BITS_PER_FIELD_ELEM: usize = 254; pub struct PackerConfig<F: PrimeField> { pub num_bits_per_elem: usize, pub num_elem_per_packed: usize, pub num_packed_per_row: usize, pub exponents: Vec<F>, _marker: PhantomData<F>, } pub struct PackerChip<F: PrimeField> { pub config: PackerConfig<F>, } impl<F: PrimeField> PackerChip<F> { pub fn get_exponents(num_bits_per_elem: usize, num_exponents: usize) -> Vec<F> { let mul_val = F::from(1 << num_bits_per_elem); let mut exponents = vec![F::ONE]; for _ in 1..num_exponents { exponents.push(exponents[exponents.len() - 1] * mul_val); } exponents } pub fn construct(num_bits_per_elem: usize, gadget_config: &GadgetConfig) -> PackerConfig<F> { let columns = &gadget_config.columns; let num_elem_per_packed = if NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem > columns.len() - 1 { columns.len() - 1 } else { NUM_BITS_PER_FIELD_ELEM / num_bits_per_elem }; println!("column len: {}", columns.len()); println!("num_bits_per_elem: {}", num_bits_per_elem); println!("NUM_BITS_PER_FIELD_ELEM: {}", NUM_BITS_PER_FIELD_ELEM); println!("num_elem_per_packed: {}", num_elem_per_packed); let num_packed_per_row = max( 1, columns.len() / (num_elem_per_packed * (num_bits_per_elem + 1)), ); println!("num_packed_per_row: {}", num_packed_per_row); let exponents = Self::get_exponents(num_bits_per_elem, num_elem_per_packed); let config = PackerConfig { num_bits_per_elem, num_elem_per_packed, num_packed_per_row, exponents, _marker: PhantomData, };
config } pub fn configure( meta: &mut ConstraintSystem<F>, packer_config: PackerConfig<F>, gadget_config: GadgetConfig, ) -> GadgetConfig { let selector = meta.complex_selector(); let columns = gadget_config.columns; let lookup = gadget_config.tables.get(&GadgetType::InputLookup).unwrap()[0]; let exponents = &packer_config.exponents; let num_bits_per_elem = packer_config.num_bits_per_elem; let shift_val = 1 << (num_bits_per_elem - 1); let shift_val = Expression::Constant(F::from(shift_val as u64)); meta.create_gate("packer", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..packer_config.num_packed_per_row { let offset = i * (packer_config.num_elem_per_packed + 1); let inps = columns[offset..offset + packer_config.num_elem_per_packed] .iter() .map(|col| meta.query_advice(*col, Rotation::cur())) .collect::<Vec<_>>(); let outp = meta.query_advice( columns[offset + packer_config.num_elem_per_packed], Rotation::cur(), ); let res = inps .into_iter() .zip(exponents.iter()) .map(|(inp, exp)| (inp + shift_val.clone()) * (*exp)) .fold(Expression::Constant(F::ZERO), |acc, prod| acc + prod); constraints.push(s.clone() * (res - outp)); } constraints }); for i in 0..packer_config.num_packed_per_row { let offset = i * (packer_config.num_elem_per_packed + 1); for j in 0..packer_config.num_elem_per_packed { meta.lookup("packer lookup", |meta| { let s = meta.query_selector(selector); let inp = meta.query_advice(columns[offset + j], Rotation::cur()); vec![(s * (inp + shift_val.clone()), lookup)] }); } } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::Packer, vec![selector]); GadgetConfig { columns, selectors, ..gadget_
config } } pub fn copy_and_pack_row( &self, mut layouter: impl Layouter<F>, gadget_config: Rc<GadgetConfig>, cells: Vec<CellRc<F>>, zero: &AssignedCell<F, F>, ) -> Result<Vec<CellRc<F>>, Error> { let columns = &gadget_config.columns; let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0]; let num_bits_per_elem = gadget_config.num_bits_per_elem; let shift_val = 1 << (num_bits_per_elem - 1); let shift_val = F::from(shift_val as u64); let outp = layouter.assign_region( || "pack row", |mut region| { if gadget_config.use_selectors { selector.enable(&mut region, 0)?; } let mut packed = vec![]; for i in 0..self.config.num_packed_per_row { let val_offset = i * self.config.num_elem_per_packed; let col_offset = i * (self.config.num_elem_per_packed + 1); let mut vals = cells [val_offset..min(val_offset + self.config.num_elem_per_packed, cells.len())] .iter() .enumerate() .map(|(i, x)| { x.copy_advice(|| "", &mut region, columns[col_offset + i], 0) .unwrap(); x.value().copied() }) .collect::<Vec<_>>(); let zero_copied = (cells.len()..self.config.num_elem_per_packed) .map(|i| { zero .copy_advice(|| "", &mut region, columns[col_offset + i], 0) .unwrap(); zero.value().copied() }) .collect::<Vec<_>>(); vals.extend(zero_copied); let res = vals.iter().zip(self.config.exponents.iter()).fold( Value::known(F::ZERO), |acc, (inp, exp)| { let res = acc + (*inp + Value::known(shift_val)) * Value::known(*exp); res }, ); let outp = region.assign_advice( || "", columns[col_offset + self.config.num_elem_per_packed], 0,
|| res, )?; packed.push(Rc::new(outp)); } Ok(packed) }, )?; Ok(outp) } pub fn assign_and_pack_row( &self, mut layouter: impl Layouter<F>, gadget_config: Rc<GadgetConfig>, values: Vec<&F>, zero: &AssignedCell<F, F>, ) -> Result<(Vec<CellRc<F>>, Vec<CellRc<F>>), Error> { let columns = &gadget_config.columns; let selector = gadget_config.selectors.get(&GadgetType::Packer).unwrap()[0]; let num_bits_per_elem = gadget_config.num_bits_per_elem; let shift_val = 1 << (num_bits_per_elem - 1); let shift_val = F::from(shift_val as u64); let outp = layouter.assign_region( || "pack row", |mut region| { if gadget_config.use_selectors { selector.enable(&mut region, 0)?; } let mut packed = vec![]; let mut assigned = vec![]; for i in 0..self.config.num_packed_per_row { let val_offset = i * self.config.num_elem_per_packed; let col_offset = i * (self.config.num_elem_per_packed + 1); let mut values = values [val_offset..min(val_offset + self.config.num_elem_per_packed, values.len())] .iter() .map(|x| **x) .collect::<Vec<_>>(); let vals = values .iter() .enumerate() .map(|(i, x)| { let tmp = region .assign_advice(|| "", columns[col_offset + i], 0, || Value::known(*x)) .unwrap(); Rc::new(tmp) }) .collect::<Vec<_>>(); assigned.extend(vals); let zero_vals = (values.len()..self.config.num_elem_per_packed) .map(|i| { zero .copy_advice(|| "", &mut region, columns[col_offset + i], 0) .unwrap(); F::ZERO }) .collect::<Vec<_>>(); values.extend(zero_vals); let res = values .iter() .zip(self.c
onfig.exponents.iter()) .fold(F::ZERO, |acc, (inp, exp)| { let res = acc + (*inp + shift_val) * (*exp); res }); let outp = region.assign_advice( || "", columns[col_offset + self.config.num_elem_per_packed], 0, || Value::known(res), )?; packed.push(Rc::new(outp)); } Ok((packed, assigned)) }, )?; Ok(outp) } pub fn assign_and_pack( &self, mut layouter: impl Layouter<F>, gadget_config: Rc<GadgetConfig>, constants: &HashMap<i64, CellRc<F>>, tensors: &BTreeMap<i64, Array<F, IxDyn>>, ) -> Result<(BTreeMap<i64, AssignedTensor<F>>, Vec<CellRc<F>>), Error> { let mut values = vec![]; for (_, tensor) in tensors { for value in tensor.iter() { values.push(value); } } let mut packed = vec![]; let mut assigned = vec![]; let zero = constants.get(&0).unwrap().clone(); let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed; for i in 0..(values.len().div_ceil(num_elems_per_row)) { let row = values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec(); let (row_packed, row_assigned) = self .assign_and_pack_row( layouter.namespace(|| "pack row"), gadget_config.clone(), row, zero.as_ref(), ) .unwrap(); packed.extend(row_packed); assigned.extend(row_assigned); } let mut assigned_tensors = BTreeMap::new(); let mut start_idx = 0; for (tensor_id, tensor) in tensors { let num_el = tensor.len(); let v = assigned[start_idx..start_idx + num_el].to_vec(); let new_tensor = Array::from_shape_vec(tensor.raw_dim(), v).unwrap(); assigned_tensors.insert(*tensor_id, new_tensor); start_idx += num_el; } Ok((assigned_tensors, packed)) } pub fn copy_and_pack( &self, mut layouter: impl La
youter<F>, gadget_config: Rc<GadgetConfig>, constants: &HashMap<i64, CellRc<F>>, tensors: &BTreeMap<i64, AssignedTensor<F>>, ) -> Result<Vec<CellRc<F>>, Error> { let mut values = vec![]; for (_, tensor) in tensors { for value in tensor.iter() { values.push(value.clone()); } } let mut packed = vec![]; let zero = constants.get(&0).unwrap().clone(); let num_elems_per_row = self.config.num_packed_per_row * self.config.num_elem_per_packed; for i in 0..(values.len().div_ceil(num_elems_per_row)) { let row = values[i * num_elems_per_row..min((i + 1) * num_elems_per_row, values.len())].to_vec(); let row_packed = self .copy_and_pack_row( layouter.namespace(|| "pack row"), gadget_config.clone(), row, zero.as_ref(), ) .unwrap(); packed.extend(row_packed); } Ok(packed) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_gadgets::poseidon::{ primitives::{generate_constants, Absorbing, ConstantLength, Domain, Mds, Spec}, PaddedWord, PoseidonSpongeInstructions, Pow5Chip, Pow5Config, Sponge, }; use halo2_proofs::{ circuit::Layouter, halo2curves::ff::{FromUniformBytes, PrimeField}, plonk::{Advice, Column, ConstraintSystem, Error}, }; use crate::{gadgets::gadget::GadgetConfig, layers::layer::CellRc}; use super::commit::Commit; pub const WIDTH: usize = 3; pub const RATE: usize = 2; pub const L: usize = 8 - WIDTH - 1; pub struct PoseidonCommitChip< F: PrimeField + Ord + FromUniformBytes<64>, const WIDTH: usize, const RATE: usize, const L: usize, > { pub poseidon_config: Pow5Config<F, WIDTH, RATE>, } pub struct P128Pow5T3Gen<F: PrimeField, const SECURE_MDS: usize>(PhantomData<F>); impl<F: PrimeField, const SECURE_MDS: usize> P128Pow5T3Gen<F, SECURE_MDS> { pub fn new() -> Self { P128Pow5T3Gen(PhantomData::default()) } } impl<F: FromUniformBytes<64> + Ord, const SECURE_MDS: usize> Spec<F, 3, 2> for P128Pow5T3Gen<F, SECURE_MDS> { fn full_rounds() -> usize { 8 } fn partial_rounds() -> usize { 56 } fn sbox(val: F) -> F { val.pow_vartime([5]) } fn secure_mds() -> usize { SECURE_MDS } fn constants() -> (Vec<[F; 3]>, Mds<F, 3>, Mds<F, 3>) { generate_constants::<_, Self, 3, 2>() } } pub struct MyHash< F: PrimeField, PoseidonChip: PoseidonSpongeInstructions<F, S, D, T, RATE>, S: Spec<F, T, RATE>, D: Domain<F, RATE>, const T: usize, const RATE: usize, > { pub sponge: Sponge<F, PoseidonChip, S, Absorbing<PaddedWord<F>, RATE>, D, T, RATE>, } impl<F: PrimeField + Ord + FromUniformBytes<64>> PoseidonCommitChip<F, WIDTH, RATE, L> { pub fn configure( meta: &mut ConstraintSystem<F>, _input: [Column<Advice>; L], state: [Column<Advice>; WIDTH], partial_sbox: Column<Advice>, ) -> PoseidonCommitChip<F, WIDTH, RATE, L> { let rc_a = (0..WIDTH).map(|_| meta.fixe
d_column()).collect::<Vec<_>>(); let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>(); meta.enable_constant(rc_b[0]); PoseidonCommitChip { poseidon_config: Pow5Chip::configure::<P128Pow5T3Gen<F, 0>>( meta, state.try_into().unwrap(), partial_sbox, rc_a.try_into().unwrap(), rc_b.try_into().unwrap(), ), } } } impl<F: PrimeField + Ord + FromUniformBytes<64>> Commit<F> for PoseidonCommitChip<F, WIDTH, RATE, L> { fn commit( &self, mut layouter: impl Layouter<F>, _gadget_config: Rc<GadgetConfig>, _constants: &HashMap<i64, CellRc<F>>, values: &Vec<CellRc<F>>, blinding: CellRc<F>, ) -> Result<Vec<CellRc<F>>, Error> { let chip = Pow5Chip::construct(self.poseidon_config.clone()); let mut hasher: MyHash<F, Pow5Chip<F, 3, 2>, P128Pow5T3Gen<F, 0>, ConstantLength<L>, 3, 2> = Sponge::new(chip, layouter.namespace(|| "sponge")) .map(|sponge| MyHash { sponge }) .unwrap(); let mut new_vals = values .iter() .map(|x| x.clone()) .chain(vec![blinding.clone()]) .collect::<Vec<_>>(); while new_vals.len() % L != 0 { new_vals.push(blinding.clone()); } for (i, value) in new_vals .iter() .map(|x| PaddedWord::Message((**x).clone())) .chain(<ConstantLength<L> as Domain<F, RATE>>::padding(L).map(PaddedWord::Padding)) .enumerate() { hasher .sponge .absorb(layouter.namespace(|| format!("absorb {}", i)), value) .unwrap(); } let outp = hasher .sponge .finish_absorbing(layouter.namespace(|| "finish absorbing")) .unwrap() .squeeze(layouter.namespace(|| "squeeze")) .unwrap(); let outp = Rc::new(outp); Ok(vec![outp]) } }
pub mod add_pairs; pub mod adder; pub mod bias_div_floor_relu6; pub mod bias_div_round_relu6; pub mod dot_prod; pub mod gadget; pub mod input_lookup; pub mod max; pub mod mul_pairs; pub mod sqrt_big; pub mod square; pub mod squared_diff; pub mod sub_pairs; pub mod update; pub mod var_div; pub mod var_div_big; pub mod var_div_big3; // Generics pub mod nonlinear;
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type AddPairsConfig = GadgetConfig; pub struct AddPairsChip<F: PrimeField> { config: Rc<AddPairsConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> AddPairsChip<F> { pub fn construct(config: Rc<AddPairsConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("add pair", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..columns.len() / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); let res = inp1 + inp2; constraints.append(&mut vec![s.clone() * (res - outp)]) } constraints }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::AddPairs, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for AddPairsChip<F> { fn name(&self) -> String { "add pairs chip".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_
inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); let columns = &self.config.columns; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::AddPairs).unwrap()[0]; selector.enable(region, row_offset)?; } let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; let outp = inp1.value().map(|x: &F| x.to_owned()) + inp2.value().map(|x: &F| x.to_owned()); let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; outps.push(outp); } Ok(outps) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { inp1.push(zero); inp2.push(zero); } let vec_inputs = vec![inp1, inp2]; let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type AdderConfig = GadgetConfig; pub struct AdderChip<F: PrimeField> { config: Rc<AdderConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> AdderChip<F> { pub fn construct(config: Rc<AdderConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("adder gate", |meta| { let s = meta.query_selector(selector); let gate_inp = columns[0..columns.len() - 1] .iter() .map(|col| meta.query_advice(*col, Rotation::cur())) .collect::<Vec<_>>(); let gate_output = meta.query_advice(*columns.last().unwrap(), Rotation::cur()); let res = gate_inp .iter() .fold(Expression::Constant(F::ZERO), |a, b| a + b.clone()); vec![s * (res - gate_output)] }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::Adder, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for AdderChip<F> { fn name(&self) -> String { "adder".to_string() } fn num_cols_per_op(&self) -> usize { self.config.columns.len() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() - 1 } fn num_outputs_per_row(&self) -> usize { 1 } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(vec_inputs.len(), 1); let inp = &vec_inputs[0]; if self.config.use_selec
tors { let selector = self.config.selectors.get(&GadgetType::Adder).unwrap()[0]; selector.enable(region, row_offset)?; } inp .iter() .enumerate() .map(|(i, cell)| cell.copy_advice(|| "", region, self.config.columns[i], row_offset)) .collect::<Result<Vec<_>, _>>()?; let e = inp.iter().fold(Value::known(F::ZERO), |a, b| { a + b.value().map(|x: &F| x.to_owned()) }); let res = region.assign_advice( || "", *self.config.columns.last().unwrap(), row_offset, || e, )?; Ok(vec![res]) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(single_inputs.len(), 1); let mut inputs = vec_inputs[0].clone(); let zero = single_inputs[0].clone(); while inputs.len() % self.num_inputs_per_row() != 0 { inputs.push(&zero); } let mut outputs = self.op_aligned_rows( layouter.namespace(|| "adder forward"), &vec![inputs], single_inputs, )?; while outputs.len() != 1 { while outputs.len() % self.num_inputs_per_row() != 0 { outputs.push(zero.clone()); } let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>(); outputs = self.op_aligned_rows( layouter.namespace(|| "adder forward"), &vec![tmp], single_inputs, )?; } Ok(outputs) } }
use std::{collections::HashMap, marker::PhantomData}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use crate::gadgets::gadget::convert_to_u64; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type BiasDivFloorRelu6Config = GadgetConfig; const SHIFT_MIN_VAL: i64 = -(1 << 30); pub struct BiasDivFloorRelu6Chip<F: PrimeField> { config: BiasDivFloorRelu6Config, _marker: PhantomData<F>, } impl<F: PrimeField> BiasDivFloorRelu6Chip<F> { pub fn construct(config: BiasDivFloorRelu6Config) -> Self { Self { config, _marker: PhantomData, } } pub fn get_map(scale_factor: u64, num_rows: i64, div_outp_min_val: i64) -> HashMap<i64, i64> { let div_val = scale_factor; let div_outp_min_val = div_outp_min_val; let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + div_outp_min_val; let val = shifted.clamp(0, 6 * div_val as i64); map.insert(i as i64, val); } map } pub fn num_cols_per_op() -> usize { 5 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.complex_selector(); let sf = Expression::Constant(F::from(gadget_config.scale_factor)); let columns = gadget_config.columns; let mod_lookup = meta.lookup_table_column(); let relu_lookup = meta.lookup_table_column(); let div_lookup = meta.lookup_table_column(); meta.create_gate("bias_mul", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); let inp = meta.query_advice(columns[offset + 0], Rotation::cur()); let bias = meta.query_advice(columns[offset + 1], Rotation::cur()); let div_res = meta.query_advice(columns[offset + 2], Rotation::cur()); let mod_res = meta.query_advice(
columns[offset + 3], Rotation::cur()); constraints.push(s.clone() * (inp - (sf.clone() * (div_res - bias) + mod_res))); } constraints }); for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); meta.lookup("bias_div_relu6 lookup", |meta| { let s = meta.query_selector(selector); let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur()); vec![(s.clone() * mod_res.clone(), mod_lookup)] }); meta.lookup("bias_div_relu6 lookup", |meta| { let s = meta.query_selector(selector); let div = meta.query_advice(columns[offset + 2], Rotation::cur()); let outp = meta.query_advice(columns[offset + 4], Rotation::cur()); let div_outp_min_val = Expression::Constant(F::from((-SHIFT_MIN_VAL) as u64)); vec![ (s.clone() * outp, relu_lookup), (s * (div + div_outp_min_val), div_lookup), ] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::BiasDivFloorRelu6, vec![selector]); let mut tables = gadget_config.tables; tables.insert( GadgetType::BiasDivFloorRelu6, vec![mod_lookup, relu_lookup, div_lookup], ); let mut maps = gadget_config.maps; let relu_map = Self::get_map( gadget_config.scale_factor, gadget_config.num_rows as i64, gadget_config.div_outp_min_val, ); maps.insert(GadgetType::BiasDivFloorRelu6, vec![relu_map]); GadgetConfig { columns, selectors, tables, maps, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for BiasDivFloorRelu6Chip<F> { fn name(&self) -> String { "BiasDivRelu6".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.num_inputs_per_row() } fn op_r
ow_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let div_val = self.config.scale_factor as i64; let div_outp_min_val_i64 = -self.config.div_outp_min_val; let div_inp_min_val_pos_i64 = -SHIFT_MIN_VAL; let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); let inp = &vec_inputs[0]; let bias = &vec_inputs[1]; assert_eq!(inp.len(), bias.len()); assert_eq!(inp.len() % self.num_inputs_per_row(), 0); let relu_map = &self .config .maps .get(&GadgetType::BiasDivFloorRelu6) .unwrap()[0]; if self.config.use_selectors { let selector = self .config .selectors .get(&GadgetType::BiasDivFloorRelu6) .unwrap()[0]; selector.enable(region, row_offset)?; } let mut outp_cells = vec![]; for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() { let offset = i * self.num_cols_per_op(); let inp_f = inp.value().map(|x: &F| x.to_owned()); let bias_f = bias.value().map(|x: &F| { let a = *x + div_inp_min_val_pos; let a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64; a }); let div_mod_res = inp_f.map(|x: F| { let x_pos = x + div_inp_min_val_pos; let inp = convert_to_u64(&x_pos); let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 / div_val); let mod_res = inp as i64 % div_val; (div_res, mod_res) }); let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f; let mod_res = div_mod_res.map(|x: (i64, i64)| x.1); let outp = div_res.map(|x: i64| { let mut x_pos = x - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap();
F::from(*outp_val as u64) }); inp.copy_advice(|| "", region, self.config.columns[offset + 0], row_offset)?; bias.copy_advice(|| "", region, self.config.columns[offset + 1], row_offset)?; let div_res_cell = region .assign_advice( || "div_res", self.config.columns[offset + 2], row_offset, || { div_res.map(|x: i64| { F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) }) }, ) .unwrap(); let _mod_res_cell = region .assign_advice( || "mod_res", self.config.columns[offset + 3], row_offset, || mod_res.map(|x: i64| F::from(x as u64)), ) .unwrap(); let outp_cell = region .assign_advice( || "outp", self.config.columns[offset + 4], row_offset, || outp.map(|x: F| x.to_owned()), ) .unwrap(); outp_cells.push(outp_cell); outp_cells.push(div_res_cell); } Ok(outp_cells) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let mut inps = vec_inputs[0].clone(); let mut biases = vec_inputs[1].clone(); let default = biases[0].clone(); while inps.len() % self.num_inputs_per_row() != 0 { inps.push(&default); biases.push(&default); } let res = self.op_aligned_rows( layouter.namespace(|| "bias_div_relu6"), &vec![inps, biases], single_inputs, )?; Ok(res) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use crate::gadgets::gadget::convert_to_u64; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type BiasDivRoundRelu6Config = GadgetConfig; const NUM_COLS_PER_OP: usize = 5; pub struct BiasDivRoundRelu6Chip<F: PrimeField> { config: Rc<BiasDivRoundRelu6Config>, _marker: PhantomData<F>, } impl<F: PrimeField> BiasDivRoundRelu6Chip<F> { pub fn construct(config: Rc<BiasDivRoundRelu6Config>) -> Self { Self { config, _marker: PhantomData, } } pub fn get_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let div_val = scale_factor; let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let val = shifted.clamp(0, 6 * div_val as i64); map.insert(i as i64, val); } map } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.complex_selector(); let sf = Expression::Constant(F::from(gadget_config.scale_factor)); let two = Expression::Constant(F::from(2)); let columns = gadget_config.columns; let mut tables = gadget_config.tables; let div_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; let relu_lookup = meta.lookup_table_column(); meta.create_gate("bias_mul", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for op_idx in 0..columns.len() / NUM_COLS_PER_OP { let offset = op_idx * NUM_COLS_PER_OP; let inp = meta.query_advice(columns[offset + 0], Rotation::cur()); let bias = meta.query_advice(columns[offset + 1], Rotation::cur()); let div_res = meta.query_advice(columns[offset + 2], Rotation::cur()); let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur());
constraints.push( s.clone() * (two.clone() * inp + sf.clone() - (sf.clone() * two.clone() * (div_res - bias) + mod_res)), ); } constraints }); for op_idx in 0..columns.len() / NUM_COLS_PER_OP { let offset = op_idx * NUM_COLS_PER_OP; meta.lookup("bias_div_relu6 lookup", |meta| { let s = meta.query_selector(selector); let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur()); vec![(s.clone() * (two.clone() * sf.clone() - mod_res), div_lookup)] }); meta.lookup("bias_div_relu6 lookup", |meta| { let s = meta.query_selector(selector); let div = meta.query_advice(columns[offset + 2], Rotation::cur()); let outp = meta.query_advice(columns[offset + 4], Rotation::cur()); let div_outp_min_val = gadget_config.div_outp_min_val; let div_outp_min_val = Expression::Constant(F::from((-div_outp_min_val) as u64)); vec![ (s.clone() * (div + div_outp_min_val), div_lookup), (s.clone() * outp, relu_lookup), ] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::BiasDivRoundRelu6, vec![selector]); tables.insert(GadgetType::BiasDivRoundRelu6, vec![relu_lookup]); let mut maps = gadget_config.maps; let relu_map = Self::get_map( gadget_config.scale_factor, gadget_config.min_val, gadget_config.num_rows as i64, ); maps.insert(GadgetType::BiasDivRoundRelu6, vec![relu_map]); GadgetConfig { columns, selectors, tables, maps, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for BiasDivRoundRelu6Chip<F> { fn name(&self) -> String { "BiasDivRelu6".to_string() } fn num_cols_per_op(&self) -> usize { NUM_COLS_PER_OP } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / NUM_COLS_PER_OP } fn num_outputs_per_row(&self) -> usize { self.num_in
puts_per_row() * 2 } fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> { let map = &self.config.maps[&GadgetType::BiasDivRoundRelu6][0]; let relu_lookup = self.config.tables[&GadgetType::BiasDivRoundRelu6][0]; layouter .assign_table( || "bdr round div/relu lookup", |mut table| { for i in 0..self.config.num_rows { let i = i as i64; let val = map.get(&i).unwrap(); table .assign_cell( || "relu lookup", relu_lookup, i as usize, || Value::known(F::from(*val as u64)), ) .unwrap(); } Ok(()) }, ) .unwrap(); Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let div_val = self.config.scale_factor as i64; let div_outp_min_val_i64 = self.config.div_outp_min_val; let div_inp_min_val_pos_i64 = -self.config.shift_min_val; let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); let inp = &vec_inputs[0]; let bias = &vec_inputs[1]; assert_eq!(inp.len(), bias.len()); assert_eq!(inp.len() % self.num_inputs_per_row(), 0); let relu_map = &self .config .maps .get(&GadgetType::BiasDivRoundRelu6) .unwrap()[0]; if self.config.use_selectors { let selector = self .config .selectors .get(&GadgetType::BiasDivRoundRelu6) .unwrap()[0]; selector.enable(region, row_offset).unwrap(); } let mut outp_cells = vec![]; for (i, (inp, bias)) in inp.iter().zip(bias.iter()).enumerate() { let offset = i * NUM_COLS_PER_OP; let inp_f = inp.value().map(|x: &F| x.to_owned()); let bias_f = bias.value().map(|x: &F| { let a = *x + div_inp_min_val_pos; l
et a = convert_to_u64(&a) as i64 - div_inp_min_val_pos_i64; a }); let div_mod_res = inp_f.map(|x: F| { let x_pos = x + div_inp_min_val_pos; let inp = convert_to_u64(&x_pos) as i64; let div_inp = 2 * inp + div_val; let div_res = div_inp / (2 * div_val) - div_inp_min_val_pos_i64 / div_val; let mod_res = div_inp % (2 * div_val); (div_res, mod_res) }); let div_res = div_mod_res.map(|x: (i64, i64)| x.0) + bias_f; let mod_res = div_mod_res.map(|x: (i64, i64)| x.1); let outp = div_res.map(|x: i64| { let mut x_pos = x - div_outp_min_val_i64; if !relu_map.contains_key(&(x_pos)) { println!("x: {}, x_pos: {}", x, x_pos); x_pos = 0; } let outp_val = relu_map.get(&(x_pos)).unwrap(); F::from(*outp_val as u64) }); inp .copy_advice(|| "", region, self.config.columns[offset + 0], row_offset) .unwrap(); bias .copy_advice(|| "", region, self.config.columns[offset + 1], row_offset) .unwrap(); let div_res_cell = region .assign_advice( || "div_res", self.config.columns[offset + 2], row_offset, || { div_res.map(|x: i64| { F::from((x - div_outp_min_val_i64) as u64) - F::from(-div_outp_min_val_i64 as u64) }) }, ) .unwrap(); let _mod_res_cell = region .assign_advice( || "mod_res", self.config.columns[offset + 3], row_offset, || mod_res.map(|x: i64| F::from(x as u64)), ) .unwrap(); let outp_cell = region .assign_advice( || "outp", self.config.columns[offset + 4], row_offset, || outp.map(|x: F| x.to_owned()), ) .unwrap(); outp_cells.push(outp_cell); outp_cells.push(div_res_cell); } Ok(outp_cells) } fn forward( &self, mut layouter:
impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let mut inps = vec_inputs[0].clone(); let mut biases = vec_inputs[1].clone(); let initial_len = inps.len(); let default = biases[0].clone(); while inps.len() % self.num_inputs_per_row() != 0 { inps.push(&default); biases.push(&default); } let res = self .op_aligned_rows( layouter.namespace(|| "bias_div_relu6"), &vec![inps, biases], single_inputs, ) .unwrap(); Ok(res[0..initial_len * 2].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{Advice, Column, ConstraintSystem, Error, Expression}, poly::Rotation, }; use crate::gadgets::adder::AdderChip; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type DotProductConfig = GadgetConfig; pub struct DotProductChip<F: PrimeField> { config: Rc<DotProductConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> DotProductChip<F> { pub fn construct(config: Rc<DotProductConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn get_input_columns(config: &GadgetConfig) -> Vec<Column<Advice>> { let num_inputs = (config.columns.len() - 1) / 2; config.columns[0..num_inputs].to_vec() } pub fn get_weight_columns(config: &GadgetConfig) -> Vec<Column<Advice>> { let num_inputs = (config.columns.len() - 1) / 2; config.columns[num_inputs..config.columns.len() - 1].to_vec() } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = &gadget_config.columns; meta.create_gate("dot product gate", |meta| { let s = meta.query_selector(selector); let gate_inp = DotProductChip::<F>::get_input_columns(&gadget_config) .iter() .map(|col| meta.query_advice(*col, Rotation::cur())) .collect::<Vec<_>>(); let gate_weights = DotProductChip::<F>::get_weight_columns(&gadget_config) .iter() .map(|col| meta.query_advice(*col, Rotation::cur())) .collect::<Vec<_>>(); let gate_output = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); let res = gate_inp .iter() .zip(gate_weights) .map(|(a, b)| a.clone() * b.clone()) .fold(Expression::Constant(F::ZERO), |a, b| a + b); vec![s * (res - gate_output)] }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::DotProduct, ve
c![selector]); GadgetConfig { columns: gadget_config.columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for DotProductChip<F> { fn name(&self) -> String { "dot product".to_string() } fn num_cols_per_op(&self) -> usize { self.config.columns.len() } fn num_inputs_per_row(&self) -> usize { (self.config.columns.len() - 1) / 2 } fn num_outputs_per_row(&self) -> usize { 1 } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(vec_inputs.len(), 2); let inp = &vec_inputs[0]; let weights = &vec_inputs[1]; assert_eq!(inp.len(), weights.len()); assert_eq!(inp.len(), self.num_inputs_per_row()); let zero = &single_inputs[0]; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::DotProduct).unwrap()[0]; selector.enable(region, row_offset).unwrap(); } let inp_cols = DotProductChip::<F>::get_input_columns(&self.config); inp .iter() .enumerate() .map(|(i, cell)| cell.copy_advice(|| "", region, inp_cols[i], row_offset)) .collect::<Result<Vec<_>, _>>() .unwrap(); let weight_cols = DotProductChip::<F>::get_weight_columns(&self.config); weights .iter() .enumerate() .map(|(i, cell)| cell.copy_advice(|| "", region, weight_cols[i], row_offset)) .collect::<Result<Vec<_>, _>>() .unwrap(); if self.config.columns.len() % 2 == 0 { zero .copy_advice( || "", region, self.config.columns[self.config.columns.len() - 2], row_offset, ) .unwrap(); } let e = inp .iter() .zip(weights.iter()) .map(|(a, b)| a.value().map(|x: &F| *x) * b.value()) .reduce(|a, b| a + b) .unwrap(); let res = region .assign_adv
ice( || "", self.config.columns[self.config.columns.len() - 1], row_offset, || e, ) .unwrap(); Ok(vec![res]) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(vec_inputs.len(), 2); assert_eq!(single_inputs.len(), 1); let zero = &single_inputs[0]; let mut inputs = vec_inputs[0].clone(); let mut weights = vec_inputs[1].clone(); while inputs.len() % self.num_inputs_per_row() != 0 { inputs.push(&zero); weights.push(&zero); } let outputs = layouter .assign_region( || "dot prod rows", |mut region| { let mut outputs = vec![]; for i in 0..inputs.len() / self.num_inputs_per_row() { let inp = inputs[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec(); let weights = weights[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec(); let res = self .op_row_region(&mut region, i, &vec![inp, weights], &vec![zero.clone()]) .unwrap(); outputs.push(res[0].clone()); } Ok(outputs) }, ) .unwrap(); let adder_chip = AdderChip::<F>::construct(self.config.clone()); let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>(); Ok( adder_chip .forward( layouter.namespace(|| "dot prod adder"), &vec![tmp], single_inputs, ) .unwrap(), ) } }
use std::{ collections::{BTreeSet, HashMap}, sync::Arc, }; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::group::ff::PrimeField, plonk::{Advice, Column, Error, Fixed, Selector, TableColumn}, }; use num_bigint::{BigUint, ToBigUint}; use num_traits::cast::ToPrimitive; pub enum GadgetType { AddPairs, Adder, BiasDivRoundRelu6, BiasDivFloorRelu6, DotProduct, Exp, Logistic, Max, Pow, Relu, Rsqrt, Sqrt, SqrtBig, Square, SquaredDiff, SubPairs, Tanh, MulPairs, VarDivRound, VarDivRoundBig, VarDivRoundBig3, Packer, InputLookup, Update, } pub
struct GadgetConfig { pub used_gadgets: Arc<BTreeSet<GadgetType>>, pub columns: Vec<Column<Advice>>, pub fixed_columns: Vec<Column<Fixed>>, pub selectors: HashMap<GadgetType, Vec<Selector>>, pub tables: HashMap<GadgetType, Vec<TableColumn>>, pub maps: HashMap<GadgetType, Vec<HashMap<i64, i64>>>, pub scale_factor: u64, pub shift_min_val: i64, pub num_rows: usize, pub num_cols: usize, pub k: usize, pub eta: f64, pub min_val: i64, pub max_val: i64, pub div_outp_min_val: i64, pub use_selectors: bool, pub commit_before: Vec<Vec<i64>>, pub commit_after: Vec<Vec<i64>>, pub num_bits_per_elem: i64, } pub fn convert_to_u64<F: PrimeField>(x: &F) -> u64 { let big = BigUint::from_bytes_le(x.to_repr().as_ref()); let big_digits = big.to_u64_digits(); if big_digits.len() > 2 { println!("big_digits: {:?}", big_digits); } if big_digits.len() == 1 { big_digits[0] as u64 } else if big_digits.len() == 0 { 0 } else { panic!(); } } pub fn convert_to_u128<F: PrimeField>(x: &F) -> u128 { let big = BigUint::from_bytes_le(x.to_repr().as_ref()); big.to_biguint().unwrap().to_u128().unwrap() } pub trait Gadget<F: PrimeField> { fn name(&self) -> String; fn num_cols_per_op(&self) -> usize; fn num_inputs_per_row(&self) -> usize; fn num_outputs_per_row(&self) -> usize; fn load_lookups(&self, _layouter: impl Layouter<F>) -> Result<(), Error> { Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error>; fn op_aligned_rows( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { for inp in vec_inputs.iter() { assert_eq!(inp.len() % self.num_inputs_per_row(), 0); } let outputs = layouter.assign_region( || format!("gad
get {}", self.name()), |mut region| { let mut outputs = vec![]; for i in 0..vec_inputs[0].len() / self.num_inputs_per_row() { let mut vec_inputs_row = vec![]; for inp in vec_inputs.iter() { vec_inputs_row.push( inp[i * self.num_inputs_per_row()..(i + 1) * self.num_inputs_per_row()].to_vec(), ); } let row_outputs = self.op_row_region(&mut region, i, &vec_inputs_row, &single_inputs)?; assert_eq!(row_outputs.len(), self.num_outputs_per_row()); outputs.extend(row_outputs); } Ok(outputs) }, )?; Ok(outputs) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), vec_inputs, single_inputs, ) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; pub struct InputLookupChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> InputLookupChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let lookup = meta.lookup_table_column(); let mut tables = gadget_config.tables; tables.insert(GadgetType::InputLookup, vec![lookup]); GadgetConfig { tables, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for InputLookupChip<F> { fn load_lookups(&self, mut layouter: impl Layouter<F>) -> Result<(), Error> { let lookup = self.config.tables[&GadgetType::InputLookup][0]; layouter .assign_table( || "input lookup", |mut table| { for i in 0..self.config.num_rows as i64 { table .assign_cell( || "mod lookup", lookup, i as usize, || Value::known(F::from(i as u64)), ) .unwrap(); } Ok(()) }, ) .unwrap(); Ok(()) } fn name(&self) -> String { panic!("InputLookupChip should not be called directly") } fn num_cols_per_op(&self) -> usize { panic!("InputLookupChip should not be called directly") } fn num_inputs_per_row(&self) -> usize { panic!("InputLookupChip should not be called directly") } fn num_outputs_per_row(&self) -> usize { panic!("InputLookupChip should not be called directly") } fn op_row_region( &self, _region: &mut Region<F>, _row_offset: usize, _vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>
>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { panic!("InputLookupChip should not be called directly") } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use crate::gadgets::gadget::convert_to_u64; use super::gadget::{Gadget, GadgetConfig, GadgetType}; pub struct MaxChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> MaxChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.complex_selector(); let columns = gadget_config.columns; let tables = gadget_config.tables; let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; meta.create_gate("max arithmetic", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..columns.len() / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); constraints.push(s.clone() * (inp1 - outp.clone()) * (inp2 - outp)) } constraints }); for idx in 0..columns.len() / Self::num_cols_per_op() { meta.lookup("max inp1", |meta| { let s = meta.query_selector(selector); let offset = idx * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); vec![(s * (outp - inp1), inp_lookup)] }); meta.lookup("max inp2", |meta| { let s = meta.query_selector(selector); let offset = idx * Self::num_cols_per_op(); let inp2 = meta.query_advice(c
olumns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); vec![(s * (outp - inp2), inp_lookup)] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::Max, vec![selector]); GadgetConfig { columns, selectors, tables, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for MaxChip<F> { fn name(&self) -> String { "max".to_string() } fn num_cols_per_op(&self) -> usize { 3 } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() * 2 } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(vec_inputs.len(), 1); let inp = &vec_inputs[0]; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::Max).unwrap()[0]; selector.enable(region, row_offset)?; } let min_val_pos = F::from((-self.config.shift_min_val) as u64); let mut outp = vec![]; let chunks: Vec<&[&AssignedCell<F, F>]> = inp.chunks(self.num_outputs_per_row()).collect(); let i1 = chunks[0]; let i2 = chunks[1]; for (idx, (inp1, inp2)) in i1.iter().zip(i2.iter()).enumerate() { let offset = idx * self.num_cols_per_op(); inp1 .copy_advice(|| "", region, self.config.columns[offset + 0], row_offset) .unwrap(); inp2 .copy_advice(|| "", region, self.config.columns[offset + 1], row_offset) .unwrap(); let max = inp1.value().zip(inp2.value()).map(|(a, b)| { let a = convert_to_u64(&(*a + min_val_pos)); let b = convert_to_u64(&(*b + min_val_pos)); let max = a.max(b); let max = F::from(max) - min_val_pos; max });
let res = region .assign_advice(|| "", self.config.columns[offset + 2], row_offset, || max) .unwrap(); outp.push(res); } Ok(outp) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let mut inputs = vec_inputs[0].clone(); let first = inputs[0]; while inputs.len() % self.num_inputs_per_row() != 0 { inputs.push(first); } let num_iters = inputs.len().div_ceil(self.num_inputs_per_row()) + self.num_inputs_per_row(); let mut outputs = self.op_aligned_rows( layouter.namespace(|| "max forward"), &vec![inputs], single_inputs, )?; for _ in 0..num_iters { while outputs.len() % self.num_inputs_per_row() != 0 { outputs.push(first.clone()); } let tmp = outputs.iter().map(|x| x).collect::<Vec<_>>(); outputs = self.op_aligned_rows( layouter.namespace(|| "max forward"), &vec![tmp], single_inputs, )?; } outputs = vec![outputs.into_iter().next().unwrap()]; Ok(outputs) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type MulPairsConfig = GadgetConfig; pub struct MulPairsChip<F: PrimeField> { config: Rc<MulPairsConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> MulPairsChip<F> { pub fn construct(config: Rc<MulPairsConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("mul pair", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..columns.len() / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); let res = inp1 * inp2; constraints.append(&mut vec![s.clone() * (res - outp)]) } constraints }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::MulPairs, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for MulPairsChip<F> { fn name(&self) -> String { "MulPairs".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inp
uts: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); let columns = &self.config.columns; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::MulPairs).unwrap()[0]; selector.enable(region, row_offset)?; } let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; let outp = inp1.value().map(|x: &F| x.to_owned()) * inp2.value().map(|x: &F| x.to_owned()); let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; outps.push(outp); } Ok(outps) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { inp1.push(zero); inp2.push(zero); } let vec_inputs = vec![inp1, inp2]; let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
pub mod exp; pub mod logistic; pub mod non_linearity; pub mod pow; pub mod relu; pub mod rsqrt; pub mod sqrt; pub mod tanh;
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; type ExpGadgetConfig = GadgetConfig; pub struct ExpGadgetChip<F: PrimeField> { config: Rc<ExpGadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> ExpGadgetChip<F> { pub fn construct(config: Rc<ExpGadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <ExpGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Exp) } } impl<F: PrimeField> NonLinearGadget<F> for ExpGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / (scale_factor as f64); let exp = x.exp(); let exp = (exp * ((scale_factor * scale_factor) as f64)).round() as i64; map.insert(i as i64, exp); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Exp).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Exp).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for ExpGadgetChip<F> { fn name(&self) -> String { "Exp".to_string() } fn num_cols_per_op(&self) -> usize { <ExpGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self.c
onfig.clone(), GadgetType::Exp)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct LogisticGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> LogisticGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <LogisticGadgetChip<F> as NonLinearGadget<F>>::configure( meta, gadget_config, GadgetType::Logistic, ) } } impl<F: PrimeField> NonLinearGadget<F> for LogisticGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / (scale_factor as f64); let logistic = 1. / (1. + (-x).exp()); let logistic = (logistic * ((scale_factor) as f64)).round() as i64; map.insert(i as i64, logistic); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Logistic).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Logistic).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for LogisticGadgetChip<F> { fn name(&self) -> String { "LogisticChip".to_string() } fn num_cols_per_op(&self) -> usize { <LogisticGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> {
NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Logistic)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region, Value}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression, Selector}, poly::Rotation, }; use crate::gadgets::gadget::convert_to_u128; use super::super::gadget::Gadget; use super::super::gadget::{GadgetConfig, GadgetType}; const NUM_COLS_PER_OP: usize = 2; pub trait NonLinearGadget<F: PrimeField>: Gadget<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64>; fn get_map(&self) -> &HashMap<i64, i64>; fn get_selector(&self) -> Selector; fn num_cols_per_op() -> usize { NUM_COLS_PER_OP } fn configure( meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig, gadget_type: GadgetType, ) -> GadgetConfig { let selector = meta.complex_selector(); let columns = gadget_config.columns; let mut tables = gadget_config.tables; let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; let outp_lookup = meta.lookup_table_column(); for op_idx in 0..columns.len() / NUM_COLS_PER_OP { let offset = op_idx * NUM_COLS_PER_OP; meta.lookup("non-linear lookup", |meta| { let s = meta.query_selector(selector); let inp = meta.query_advice(columns[offset + 0], Rotation::cur()); let outp = meta.query_advice(columns[offset + 1], Rotation::cur()); let shift_val = gadget_config.min_val; let shift_val_pos = Expression::Constant(F::from((-shift_val) as u64)); vec![ (s.clone() * (inp + shift_val_pos), inp_lookup), (s.clone() * outp, outp_lookup), ] }); } let mut selectors = gadget_config.selectors; selectors.insert(gadget_type, vec![selector]); tables.insert(gadget_type, vec![inp_lookup, outp_lookup]); let mut maps = gadget_config.maps; let non_linear_map = Self::generate_map( gadget_config.scale_factor, gadget_config.min_val, gadget_config.num_rows as i64, ); maps
.insert(gadget_type, vec![non_linear_map]); GadgetConfig { columns, selectors, tables, maps, ..gadget_config } } fn load_lookups( &self, mut layouter: impl Layouter<F>, config: Rc<GadgetConfig>, gadget_type: GadgetType, ) -> Result<(), Error> { let map = self.get_map(); let table_col = config.tables.get(&gadget_type).unwrap()[1]; let shift_pos_i64 = -config.shift_min_val; let shift_pos = F::from(shift_pos_i64 as u64); layouter.assign_table( || "non linear table", |mut table| { for i in 0..config.num_rows { let i = i as i64; let tmp = *map.get(&i).unwrap(); let val = if i == 0 { F::ZERO } else { if tmp >= 0 { F::from(tmp as u64) } else { let tmp = tmp + shift_pos_i64; F::from(tmp as u64) - shift_pos } }; table.assign_cell( || "non linear cell", table_col, i as usize, || Value::known(val), )?; } Ok(()) }, )?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, gadget_config: Rc<GadgetConfig>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let columns = &gadget_config.columns; let inp = &vec_inputs[0]; let map = self.get_map(); let shift_val_pos_i64 = -gadget_config.shift_min_val; let shift_val_pos = F::from(shift_val_pos_i64 as u64); let min_val = gadget_config.min_val; if gadget_config.use_selectors { let selector = self.get_selector(); selector.enable(region, row_offset)?; } let mut outps = vec![]; for i in 0..inp.len() { let offset = i * 2; inp[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let outp = inp[i].value().map(|x: &F| { l
et pos = convert_to_u128(&(*x + shift_val_pos)) as i128 - shift_val_pos_i64 as i128; let x = pos as i64 - min_val; let val = *map.get(&x).unwrap(); if x == 0 { F::ZERO } else { if val >= 0 { F::from(val as u64) } else { let val_pos = val + shift_val_pos_i64; F::from(val_pos as u64) - F::from(shift_val_pos_i64 as u64) } } }); let outp = region.assign_advice(|| "nonlinearity", columns[offset + 1], row_offset, || outp)?; outps.push(outp); } Ok(outps) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let inp_len = vec_inputs[0].len(); let mut inp = vec_inputs[0].clone(); while inp.len() % self.num_inputs_per_row() != 0 { inp.push(zero); } let vec_inputs = vec![inp]; let outp = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, &single_inputs, )?; Ok(outp[0..inp_len].to_vec()) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct PowGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> PowGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <PowGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Pow) } } impl<F: PrimeField> NonLinearGadget<F> for PowGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let power = 3.; let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / (scale_factor as f64); let y = x.powf(power); let y = (y * ((scale_factor) as f64)).round() as i64; map.insert(i as i64, y); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Pow).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Pow).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for PowGadgetChip<F> { fn name(&self) -> String { "PowGadgetChip".to_string() } fn num_cols_per_op(&self) -> usize { <PowGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::P
ow)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct ReluChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> ReluChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <ReluChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Relu) } } impl<F: PrimeField> NonLinearGadget<F> for ReluChip<F> { fn generate_map(_scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let relu = shifted.max(0); map.insert(i as i64, relu); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Relu).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Relu).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for ReluChip<F> { fn name(&self) -> String { "Relu".to_string() } fn num_cols_per_op(&self) -> usize { <ReluChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Relu)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inp
uts: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct RsqrtGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> RsqrtGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <RsqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Rsqrt) } } impl<F: PrimeField> NonLinearGadget<F> for RsqrtGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / (scale_factor as f64); let sqrt = x.sqrt(); let rsqrt = 1.0 / sqrt; let rsqrt = (rsqrt * (scale_factor as f64)).round() as i64; map.insert(i as i64, rsqrt); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Rsqrt).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Rsqrt).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for RsqrtGadgetChip<F> { fn name(&self) -> String { "RsqrtGadget".to_string() } fn num_cols_per_op(&self) -> usize { <RsqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self
.config.clone(), GadgetType::Rsqrt)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct SqrtGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> SqrtGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <SqrtGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Sqrt) } } impl<F: PrimeField> NonLinearGadget<F> for SqrtGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / (scale_factor as f64); let sqrt = x.sqrt(); let sqrt = (sqrt * (scale_factor as f64)).round() as i64; map.insert(i as i64, sqrt); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Sqrt).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Sqrt).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for SqrtGadgetChip<F> { fn name(&self) -> String { "SqrtGadget".to_string() } fn num_cols_per_op(&self) -> usize { <SqrtGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self.config.clone(), GadgetType::Sqrt)?; Ok
(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{collections::HashMap, marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, }; use super::{ super::gadget::{Gadget, GadgetConfig, GadgetType}, non_linearity::NonLinearGadget, }; pub struct TanhGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> TanhGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { <TanhGadgetChip<F> as NonLinearGadget<F>>::configure(meta, gadget_config, GadgetType::Tanh) } } impl<F: PrimeField> NonLinearGadget<F> for TanhGadgetChip<F> { fn generate_map(scale_factor: u64, min_val: i64, num_rows: i64) -> HashMap<i64, i64> { let scale_factor = scale_factor as f64; let mut map = HashMap::new(); for i in 0..num_rows { let shifted = i + min_val; let x = (shifted as f64) / scale_factor; let y = x.tanh(); let y = (y * scale_factor).round() as i64; map.insert(i as i64, y); } map } fn get_map(&self) -> &HashMap<i64, i64> { &self.config.maps.get(&GadgetType::Tanh).unwrap()[0] } fn get_selector(&self) -> halo2_proofs::plonk::Selector { self.config.selectors.get(&GadgetType::Tanh).unwrap()[0] } } impl<F: PrimeField> Gadget<F> for TanhGadgetChip<F> { fn name(&self) -> String { "TanhGadgetChip".to_string() } fn num_cols_per_op(&self) -> usize { <TanhGadgetChip<F> as NonLinearGadget<F>>::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn load_lookups(&self, layouter: impl Layouter<F>) -> Result<(), Error> { NonLinearGadget::load_lookups(self, layouter, self.config.clone(), Gadget
Type::Tanh)?; Ok(()) } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::op_row_region( self, region, row_offset, vec_inputs, single_inputs, self.config.clone(), ) } fn forward( &self, layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { NonLinearGadget::forward(self, layouter, vec_inputs, single_inputs) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use crate::gadgets::gadget::convert_to_u64; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type SqrtBigConfig = GadgetConfig; pub struct SqrtBigChip<F: PrimeField> { config: Rc<SqrtBigConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> SqrtBigChip<F> { pub fn construct(config: Rc<SqrtBigConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.complex_selector(); let two = Expression::Constant(F::from(2)); let columns = gadget_config.columns; let tables = gadget_config.tables; let inp_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; meta.create_gate("sqrt_big arithm", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); let inp = meta.query_advice(columns[offset + 0], Rotation::cur()); let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur()); let rem = meta.query_advice(columns[offset + 2], Rotation::cur()); let lhs = inp.clone(); let rhs = sqrt.clone() * sqrt.clone() + rem.clone(); constraints.push(s.clone() * (lhs - rhs)); } constraints }); for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); meta.lookup("sqrt_big sqrt lookup", |meta| { let s = meta.query_selector(selector); let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur()); vec![(s.clone() * sqrt, inp_lookup)] }); meta.lookup("sqrt_big rem lookup", |meta|
{ let s = meta.query_selector(selector); let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur()); let rem = meta.query_advice(columns[offset + 2], Rotation::cur()); vec![(s.clone() * (rem + sqrt), inp_lookup)] }); meta.lookup("sqrt_big sqrt - rem lookup", |meta| { let s = meta.query_selector(selector); let sqrt = meta.query_advice(columns[offset + 1], Rotation::cur()); let rem = meta.query_advice(columns[offset + 2], Rotation::cur()); vec![(s.clone() * (two.clone() * sqrt - rem), inp_lookup)] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::SqrtBig, vec![selector]); GadgetConfig { columns, tables, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for SqrtBigChip<F> { fn name(&self) -> String { "sqrt_big".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.num_inputs_per_row() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let inps = &vec_inputs[0]; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::SqrtBig).unwrap()[0]; selector.enable(region, row_offset)?; } let mut outp_cells = vec![]; for (i, inp) in inps.iter().enumerate() { let offset = i * self.num_cols_per_op(); inp.copy_advice( || "sqrt_big", region, self.config.columns[offset], row_offset, )?; let outp = inp.value().map(|x: &F| { let inp_val = convert_to_u64(x) as i64; let fsqrt = (inp_val as f64).sqrt(); let sqrt = fsqrt.round() as i64; let
rem = inp_val - sqrt * sqrt; (sqrt, rem) }); let sqrt_cell = region.assign_advice( || "sqrt_big", self.config.columns[offset + 1], row_offset, || outp.map(|x| F::from(x.0 as u64)), )?; let _rem_cell = region.assign_advice( || "sqrt_big", self.config.columns[offset + 2], row_offset, || { outp.map(|x| { let rem_pos = x.1 + x.0; F::from(rem_pos as u64) - F::from(x.0 as u64) }) }, )?; outp_cells.push(sqrt_cell); } Ok(outp_cells) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp = vec_inputs[0].clone(); let inp_len = inp.len(); while inp.len() % self.num_inputs_per_row() != 0 { inp.push(zero); } let vec_inputs = vec![inp]; let outp = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(outp[0..inp_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; pub struct SquareGadgetChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> SquareGadgetChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("square gate", |meta| { let s = meta.query_selector(selector); let gate_inp = meta.query_advice(columns[0], Rotation::cur()); let gate_output = meta.query_advice(columns[1], Rotation::cur()); let res = gate_inp.clone() * gate_inp; vec![s * (res - gate_output)] }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::Square, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for SquareGadgetChip<F> { fn name(&self) -> String { "SquareChip".to_string() } fn num_cols_per_op(&self) -> usize { 2 } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.num_inputs_per_row() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { assert_eq!(vec_inputs.len(), 1); if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::Square).unwrap()[0]; selector.enable(region, row_offset)?; } let inps = &vec_inputs[0]; let mut outp = vec![]; for (i
, inp) in inps.iter().enumerate() { let offset = i * self.num_cols_per_op(); inp.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; let outp_val = inp.value().map(|x: &F| x.to_owned() * x.to_owned()); let outp_cell = region.assign_advice( || "square output", self.config.columns[offset + 1], row_offset, || outp_val, )?; outp.push(outp_cell); } Ok(outp) } fn forward( &self, mut layouter: impl halo2_proofs::circuit::Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp = vec_inputs[0].clone(); let initial_len = inp.len(); while inp.len() % self.num_inputs_per_row() != 0 { inp.push(zero); } let vec_inputs = vec![inp]; let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type SquaredDiffConfig = GadgetConfig; pub struct SquaredDiffGadgetChip<F: PrimeField> { config: Rc<SquaredDiffConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> SquaredDiffGadgetChip<F> { pub fn construct(config: Rc<SquaredDiffConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("squared diff", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..columns.len() / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); let res = (inp1 - inp2).square(); constraints.append(&mut vec![s.clone() * (res - outp)]) } constraints }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::SquaredDiff, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for SquaredDiffGadgetChip<F> { fn name(&self) -> String { "SquaredDiff".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, regi
on: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); let columns = &self.config.columns; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::SquaredDiff).unwrap()[0]; selector.enable(region, row_offset)?; } let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned()); let outp = outp * outp; let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; outps.push(outp); } Ok(outps) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { inp1.push(zero); inp2.push(zero); } let vec_inputs = vec![inp1, inp2]; let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error}, poly::Rotation, }; use super::gadget::{Gadget, GadgetConfig, GadgetType}; type SubPairsConfig = GadgetConfig; pub struct SubPairsChip<F: PrimeField> { config: Rc<SubPairsConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> SubPairsChip<F> { pub fn construct(config: Rc<SubPairsConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let selector = meta.selector(); let columns = gadget_config.columns; meta.create_gate("sub pair", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; for i in 0..columns.len() / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let inp1 = meta.query_advice(columns[offset + 0], Rotation::cur()); let inp2 = meta.query_advice(columns[offset + 1], Rotation::cur()); let outp = meta.query_advice(columns[offset + 2], Rotation::cur()); let res = inp1 - inp2; constraints.append(&mut vec![s.clone() * (res - outp)]) } constraints }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::SubPairs, vec![selector]); GadgetConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for SubPairsChip<F> { fn name(&self) -> String { "sub pairs chip".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_
inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let inp1 = &vec_inputs[0]; let inp2 = &vec_inputs[1]; assert_eq!(inp1.len(), inp2.len()); let columns = &self.config.columns; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::SubPairs).unwrap()[0]; selector.enable(region, row_offset)?; } let mut outps = vec![]; for i in 0..inp1.len() { let offset = i * self.num_cols_per_op(); let inp1 = inp1[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let inp2 = inp2[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; let outp = inp1.value().map(|x: &F| x.to_owned()) - inp2.value().map(|x: &F| x.to_owned()); let outp = region.assign_advice(|| "", columns[offset + 2], row_offset, || outp)?; outps.push(outp); } Ok(outps) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut inp1 = vec_inputs[0].clone(); let mut inp2 = vec_inputs[1].clone(); let initial_len = inp1.len(); while inp1.len() % self.num_inputs_per_row() != 0 { inp1.push(zero); inp2.push(zero); } let vec_inputs = vec![inp1, inp2]; let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec_inputs, single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
use std::marker::PhantomData; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use crate::gadgets::gadget::{convert_to_u64, GadgetConfig}; use super::gadget::{Gadget, GadgetType}; type UpdateConfig = GadgetConfig; pub struct UpdateGadgetChip<F: PrimeField> { config: UpdateConfig, _marker: PhantomData<F>, } impl<F: PrimeField> UpdateGadgetChip<F> { pub fn construct(config: UpdateConfig) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 4 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> UpdateConfig { let tables = &gadget_config.tables; let mod_lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; let columns = gadget_config.columns; let selector = meta.complex_selector(); let div_val = gadget_config.scale_factor; let eta: u64 = (gadget_config.scale_factor as f64 * gadget_config.eta) as u64; meta.create_gate("updater_arith", |meta| { let s = meta.query_selector(selector); let sf = Expression::Constant(F::from(div_val as u64)); let eta = Expression::Constant(F::from(eta as u64)); let mut constraints = vec![]; for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); let w = meta.query_advice(columns[offset], Rotation::cur()); let dw = meta.query_advice(columns[offset + 1], Rotation::cur()); let div = meta.query_advice(columns[offset + 2], Rotation::cur()); let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur()); let expr = (w * sf.clone() - dw * eta.clone()) - (div * sf.clone() + mod_res); constraints.push(s.clone() * expr); } constraints }); for op_idx in 0..columns.len() / Self::num_cols_per_op() { let offset = op_idx * Self::num_cols_per_op(); meta.lookup("max
inp1", |meta| { let s = meta.query_selector(selector); let mod_res = meta.query_advice(columns[offset + 3], Rotation::cur()); vec![(s.clone() * mod_res.clone(), mod_lookup)] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::Update, vec![selector]); UpdateConfig { columns, selectors, ..gadget_config } } } impl<F: PrimeField + Ord> Gadget<F> for UpdateGadgetChip<F> { fn name(&self) -> String { "updater chip".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.config.columns.len() / self.num_cols_per_op() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, _single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let div_val = self.config.scale_factor as i64; let div_val_f = F::from(div_val as u64); let eta = div_val / 1000; let eta = F::from(eta as u64); let div_outp_min_val = self.config.div_outp_min_val; let div_inp_min_val_pos_i64 = -self.config.shift_min_val; let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); let columns = &self.config.columns; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::Update).unwrap()[0]; selector.enable(region, row_offset)?; } let w = &vec_inputs[0]; let dw = &vec_inputs[1]; let mut output_cells = vec![]; for i in 0..w.len() { let offset = i * self.num_cols_per_op(); let _w_cell = w[i].copy_advice(|| "", region, columns[offset + 0], row_offset)?; let _dw_cell = dw[i].copy_advice(|| "", region, columns[offset + 1], row_offset)?; let w_val = w[i].value().map(|x: &F| x.to_owned()); let dw_val = dw[i].value().map
(|x: &F| x.to_owned()); let out_scaled = w_val.zip(dw_val).map(|(w, dw)| w * div_val_f - dw * eta); let div_mod = out_scaled.map(|x| { let x_pos = x + div_inp_min_val_pos; let x_pos = if x_pos > F::ZERO { x_pos } else { x_pos + div_val_f }; let inp = convert_to_u64(&x_pos); let div_res = inp as i64 / div_val - (div_inp_min_val_pos_i64 as i64 / div_val); let mod_res = inp as i64 % div_val; (div_res, mod_res) }); let div_res_cell = region .assign_advice( || "div_res", self.config.columns[offset + 2], row_offset, || { div_mod.map(|(x, _): (i64, i64)| { F::from((x - div_outp_min_val as i64) as u64) - F::from(-div_outp_min_val as u64) }) }, ) .unwrap(); let _mod_res_cell = region .assign_advice( || "mod_res", self.config.columns[offset + 3], row_offset, || div_mod.map(|(_, x): (i64, i64)| F::from(x as u64)), ) .unwrap(); output_cells.push(div_res_cell); } Ok(output_cells) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let zero = &single_inputs[0]; let mut w = vec_inputs[0].clone(); let mut dw = vec_inputs[1].clone(); let initial_len = w.len(); while !w.len() % self.num_cols_per_op() == 0 { w.push(zero); } while !dw.len() % self.num_cols_per_op() == 0 { dw.push(zero); } let res = self.op_aligned_rows( layouter.namespace(|| format!("forward row {}", self.name())), &vec![w, dw], single_inputs, )?; Ok(res[0..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use rounded_div::RoundedDiv; use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType}; type VarDivRoundConfig = GadgetConfig; pub struct VarDivRoundChip<F: PrimeField> { config: Rc<VarDivRoundConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> VarDivRoundChip<F> { pub fn construct(config: Rc<VarDivRoundConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 3 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let columns = gadget_config.columns; let selector = meta.complex_selector(); let two = Expression::Constant(F::from(2)); let tables = gadget_config.tables; let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; meta.create_gate("var_div_arithm", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let a = meta.query_advice(columns[offset], Rotation::cur()); let c = meta.query_advice(columns[offset + 1], Rotation::cur()); let r = meta.query_advice(columns[offset + 2], Rotation::cur()); let lhs = a.clone() * two.clone() + b.clone(); let rhs = b.clone() * two.clone() * c + r; constraints.push(s.clone() * (lhs - rhs)); } constraints }); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); meta.lookup("var div range checks r", |meta| { let s = meta.query_selector(selector); let r = meta.query_advice(columns[offset + 2], Rotation::cur());
vec![(s.clone() * r, lookup)] }); meta.lookup("var div range checks 2b-r", |meta| { let s = meta.query_selector(selector); let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); let r = meta.query_advice(columns[offset + 2], Rotation::cur()); vec![(s.clone() * (two.clone() * b - r), lookup)] }); } meta.lookup("var div range checks b", |meta| { let s = meta.query_selector(selector); let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); vec![(s.clone() * b, lookup)] }); let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::VarDivRound, vec![selector]); GadgetConfig { columns, tables, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for VarDivRoundChip<F> { fn name(&self) -> String { "VarDivRoundChip".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { (self.config.columns.len() - 1) / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.num_inputs_per_row() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let a_vec = &vec_inputs[0]; let b = &single_inputs[1]; let div_outp_min_val_i64 = self.config.div_outp_min_val; let div_inp_min_val_pos_i64 = -self.config.shift_min_val; if self.config.use_selectors { let selector = self.config.selectors.get(&GadgetType::VarDivRound).unwrap()[0]; selector.enable(region, row_offset)?; } b.copy_advice( || "", region, self.config.columns[self.config.columns.len() - 1], row_offset, )?; let mut div_out = vec![]; for (i, a) in a_vec.iter().enumerate() { let offset = i * self.num_cols_per_op()
; a.copy_advice(|| "", region, self.config.columns[offset], row_offset)?; let div_mod = a.value().zip(b.value()).map(|(a, b)| { let b = convert_to_u128(b); let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64); let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); let a_pos = *a + div_inp_min_val_pos; let a = convert_to_u128(&a_pos); let c_pos = a.rounded_div(b); let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64; let rem_floor = (a as i128) - (c_pos * b) as i128; let r = 2 * rem_floor + (b as i128); let r = r as i64; (c, r) }); let div_cell = region.assign_advice( || "", self.config.columns[offset + 1], row_offset, || { div_mod.map(|(c, _)| { let offset = F::from(-div_outp_min_val_i64 as u64); let c = F::from((c - div_outp_min_val_i64) as u64); c - offset }) }, )?; let _mod_cell = region.assign_advice( || "", self.config.columns[offset + 2], row_offset, || div_mod.map(|(_, r)| F::from(r as u64)), )?; div_out.push(div_cell); } Ok(div_out) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let mut inps = vec_inputs[0].clone(); let initial_len = inps.len(); let default = &single_inputs[0]; while inps.len() % self.num_inputs_per_row() != 0 { inps.push(&default); } let res = self.op_aligned_rows(layouter.namespace(|| "var_div"), &vec![inps], single_inputs)?; Ok(res[..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use rounded_div::RoundedDiv; use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType}; pub struct VarDivRoundBigChip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> VarDivRoundBigChip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 7 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let columns = gadget_config.columns; let selector = meta.complex_selector(); let two = Expression::Constant(F::from(2)); let range = Expression::Constant(F::from(gadget_config.num_rows as u64)); let tables = gadget_config.tables; let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; meta.create_gate("var_div_arithm", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let a = meta.query_advice(columns[offset], Rotation::cur()); let c = meta.query_advice(columns[offset + 1], Rotation::cur()); let r = meta.query_advice(columns[offset + 2], Rotation::cur()); let lhs = a.clone() * two.clone() + b.clone(); let rhs = b.clone() * two.clone() * c + r.clone(); constraints.push(s.clone() * (lhs - rhs)); let br1 = meta.query_advice(columns[offset + 3], Rotation::cur()); let br0 = meta.query_advice(columns[offset + 4], Rotation::cur()); let lhs = b.clone() * two.clone() - r.clone(); let rhs = br1 * range.clone() + br0; constraints.p
ush(s.clone() * (lhs - rhs)); let r1 = meta.query_advice(columns[offset + 5], Rotation::cur()); let r0 = meta.query_advice(columns[offset + 6], Rotation::cur()); let lhs = r.clone(); let rhs = r1 * range.clone() + r0; constraints.push(s.clone() * (lhs - rhs)); } constraints }); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); meta.lookup("var div big br1", |meta| { let s = meta.query_selector(selector); let br1 = meta.query_advice(columns[offset + 3], Rotation::cur()); vec![(s * br1, lookup)] }); meta.lookup("var div big br0", |meta| { let s = meta.query_selector(selector); let br0 = meta.query_advice(columns[offset + 4], Rotation::cur()); vec![(s * br0, lookup)] }); meta.lookup("var div big r1", |meta| { let s = meta.query_selector(selector); let r1 = meta.query_advice(columns[offset + 5], Rotation::cur()); vec![(s * r1, lookup)] }); meta.lookup("var div big r0", |meta| { let s = meta.query_selector(selector); let r0 = meta.query_advice(columns[offset + 6], Rotation::cur()); vec![(s * r0, lookup)] }); } let mut selectors = gadget_config.selectors; selectors.insert(GadgetType::VarDivRoundBig, vec![selector]); GadgetConfig { columns, tables, selectors, ..gadget_config } } } impl<F: PrimeField> Gadget<F> for VarDivRoundBigChip<F> { fn name(&self) -> String { "VarDivBigRoundChip".to_string() } fn num_cols_per_op(&self) -> usize { Self::num_cols_per_op() } fn num_inputs_per_row(&self) -> usize { (self.config.columns.len() - 1) / self.num_cols_per_op() } fn num_outputs_per_row(&self) -> usize { self.num_inputs_per_row() } fn op_row_region( &self, region: &mut Region<F>, row_offset: usize, vec_inputs: &Vec<Vec<&AssignedCell
<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let a_vec = &vec_inputs[0]; let b = &single_inputs[1]; let div_outp_min_val_i64 = self.config.div_outp_min_val; let div_inp_min_val_pos_i64 = -self.config.shift_min_val; let num_rows = self.config.num_rows as i64; if self.config.use_selectors { let selector = self .config .selectors .get(&GadgetType::VarDivRoundBig) .unwrap()[0]; selector.enable(region, row_offset)?; } b.copy_advice( || "", region, self.config.columns[self.config.columns.len() - 1], row_offset, )?; let mut div_out = vec![]; for (i, a) in a_vec.iter().enumerate() { let offset = i * self.num_cols_per_op(); a.copy_advice(|| "", region, self.config.columns[offset], row_offset) .unwrap(); let div_mod = a.value().zip(b.value()).map(|(a, b)| { let b = convert_to_u128(b); let div_inp_min_val_pos_i64 = div_inp_min_val_pos_i64 / (b as i64) * (b as i64); let div_inp_min_val_pos = F::from(div_inp_min_val_pos_i64 as u64); let a_pos = *a + div_inp_min_val_pos; let a = convert_to_u128(&a_pos); let c_pos = a.rounded_div(b); let c = (c_pos as i128 - (div_inp_min_val_pos_i64 as u128 / b) as i128) as i64; let rem_floor = (a as i128) - (c_pos * b) as i128; let r = 2 * rem_floor + (b as i128); let r = r as i64; (c, r) }); let br_split = div_mod.zip(b.value()).map(|((_, r), b)| { let b = convert_to_u128(b) as i64; let val = 2 * b - r; let p1 = val / num_rows; let p0 = val % num_rows; (p1, p0) }); let r_split = div_mod.map(|(_, r)| { let p1 = r / num_rows; let p0 = r % num_rows; (p1, p0) }); let div_cell = region.assign_advice( || "", self.config.columns[offset + 1],
row_offset, || { div_mod.map(|(c, _)| { let offset = F::from(-div_outp_min_val_i64 as u64); let c = F::from((c - div_outp_min_val_i64) as u64); c - offset }) }, )?; let _mod_cell = region.assign_advice( || "", self.config.columns[offset + 2], row_offset, || div_mod.map(|(_, r)| F::from(r as u64)), )?; let _br_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 3], row_offset, || br_split.map(|(p1, _)| F::from(p1 as u64)), )?; let _br_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 4], row_offset, || br_split.map(|(_, p0)| F::from(p0 as u64)), )?; let _r_split_cell_1 = region.assign_advice( || "", self.config.columns[offset + 5], row_offset, || r_split.map(|(p1, _)| F::from(p1 as u64)), )?; let _r_split_cell_2 = region.assign_advice( || "", self.config.columns[offset + 6], row_offset, || r_split.map(|(_, p0)| F::from(p0 as u64)), )?; div_out.push(div_cell); } Ok(div_out) } fn forward( &self, mut layouter: impl Layouter<F>, vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>, single_inputs: &Vec<&AssignedCell<F, F>>, ) -> Result<Vec<AssignedCell<F, F>>, Error> { let mut inps = vec_inputs[0].clone(); let initial_len = inps.len(); let default = &single_inputs[0]; while inps.len() % self.num_inputs_per_row() != 0 { inps.push(&default); } let res = self.op_aligned_rows( layouter.namespace(|| "var_div_big"), &vec![inps], single_inputs, )?; Ok(res[..initial_len].to_vec()) } }
use std::{marker::PhantomData, rc::Rc}; use halo2_proofs::{ circuit::{AssignedCell, Layouter, Region}, halo2curves::ff::PrimeField, plonk::{ConstraintSystem, Error, Expression}, poly::Rotation, }; use rounded_div::RoundedDiv; use super::gadget::{convert_to_u128, Gadget, GadgetConfig, GadgetType}; pub struct VarDivRoundBig3Chip<F: PrimeField> { config: Rc<GadgetConfig>, _marker: PhantomData<F>, } impl<F: PrimeField> VarDivRoundBig3Chip<F> { pub fn construct(config: Rc<GadgetConfig>) -> Self { Self { config, _marker: PhantomData, } } pub fn num_cols_per_op() -> usize { 9 } pub fn configure(meta: &mut ConstraintSystem<F>, gadget_config: GadgetConfig) -> GadgetConfig { let columns = gadget_config.columns; let selector = meta.complex_selector(); let two = Expression::Constant(F::from(2)); let range = Expression::Constant(F::from(gadget_config.num_rows as u64)); let range_sq = range.clone() * range.clone(); let tables = gadget_config.tables; let lookup = tables.get(&GadgetType::InputLookup).unwrap()[0]; meta.create_gate("var_div_big3_arithm", |meta| { let s = meta.query_selector(selector); let mut constraints = vec![]; let b = meta.query_advice(columns[columns.len() - 1], Rotation::cur()); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); let a = meta.query_advice(columns[offset], Rotation::cur()); let c = meta.query_advice(columns[offset + 1], Rotation::cur()); let r = meta.query_advice(columns[offset + 2], Rotation::cur()); let lhs = a.clone() * two.clone() + b.clone(); let rhs = b.clone() * two.clone() * c + r.clone(); constraints.push(s.clone() * (lhs - rhs)); let br2 = meta.query_advice(columns[offset + 3], Rotation::cur()); let br1 = meta.query_advice(columns[offset + 4], Rotation::cur()); let br0 = meta.query_advice(columns[offset + 5], Rotatio
n::cur()); let lhs = b.clone() * two.clone() - r.clone(); let rhs = br2 * range_sq.clone() + br1 * range.clone() + br0; constraints.push(s.clone() * (lhs - rhs)); let r2 = meta.query_advice(columns[offset + 6], Rotation::cur()); let r1 = meta.query_advice(columns[offset + 7], Rotation::cur()); let r0 = meta.query_advice(columns[offset + 8], Rotation::cur()); let lhs = r.clone(); let rhs = r2 * range_sq.clone() + r1 * range.clone() + r0; constraints.push(s.clone() * (lhs - rhs)); } constraints }); for i in 0..(columns.len() - 1) / Self::num_cols_per_op() { let offset = i * Self::num_cols_per_op(); meta.lookup("var div big br2", |meta| { let s = meta.query_selector(selector); let br2 = meta.query_advice(columns[offset + 3], Rotation::cur()); vec![(s * br2, lookup)] }); meta.lookup("var div big br1", |meta| { let s = meta.query_selector(selector); let br1 = meta.query_advice(columns[offset + 4], Rotation::cur()); vec![(s * br1, lookup)] }); meta.lookup("var div big br0", |meta| { let s = meta.query_selector(selector); let br0 = meta.query_advice(columns[offset + 5], Rotation::cur()); vec![(s * br0, lookup)] }); meta.lookup("var div big r2", |meta| { let s = meta.query_selector(selector); let r2 = meta.query_advice(columns[offset + 6], Rotation::cur()); vec![(s * r2, lookup)] }); meta.lookup("var div big r1", |meta| { let s = meta.query_selector(selector); let r1 = meta.query_advice(columns[offset + 7], Rotation::cur()); vec![(s * r1, lookup)] }); meta.lookup("var div big r0", |meta| { let s = meta.query_selector(selector); let r0 = meta.query_advice(columns[offset + 8], Rotation::cur()); vec![(s * r0, lookup)] }); } let mut selectors = gadget_config.selectors;