Skip to content

Commit e2fd8a5

Browse files
authored
Merge pull request #1113 from fastmachinelearning/onnx_parser_max_precision
add max_precision to onnx parser
2 parents 7612b4f + bd28050 commit e2fd8a5

File tree

1 file changed

+9
-2
lines changed

1 file changed

+9
-2
lines changed

hls4ml/utils/config.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ def make_layer_config(layer):
413413

414414

415415
def config_from_onnx_model(
416-
model, granularity='name', backend=None, default_precision='ap_fixed<16,6>', default_reuse_factor=1
416+
model, granularity='name', backend=None, default_precision='fixed<16,6>', default_reuse_factor=1, max_precision=None
417417
):
418418
"""Create an HLS conversion config given the ONNX model.
419419
@@ -435,6 +435,8 @@ def config_from_onnx_model(
435435
backend(str, optional): Name of the backend to use
436436
default_precision (str, optional): Default precision to use. Defaults to 'fixed<16,6>'.
437437
default_reuse_factor (int, optional): Default reuse factor. Defaults to 1.
438+
max_precision (str or None, optional): Maximum width precision to use. Defaults to None, meaning no maximum.
439+
Note: Only integer and fixed precisions are supported
438440
439441
Raises:
440442
Exception: If ONNX model has layers not supported by hls4ml.
@@ -456,9 +458,14 @@ def config_from_onnx_model(
456458
config = {}
457459

458460
model_config = {}
459-
model_config['Precision'] = default_precision
461+
model_config['Precision'] = {}
462+
model_config['Precision']['default'] = default_precision
463+
if max_precision is not None:
464+
model_config['Precision']['maximum'] = max_precision
460465
model_config['ReuseFactor'] = default_reuse_factor
461466
model_config['Strategy'] = 'Latency'
467+
model_config['BramFactor'] = 1_000_000_000
468+
model_config['TraceOutput'] = False
462469

463470
config['Model'] = model_config
464471

0 commit comments

Comments
 (0)