Skip to content
Merged
Show file tree
Hide file tree
Changes from 56 commits
Commits
Show all changes
67 commits
Select commit Hold shift + click to select a range
ef412d9
Add Singularity support to existing run script
fpjentzsch Aug 1, 2023
8adee81
Add GHA to build & test singularity container
fpjentzsch Aug 3, 2023
39e88cd
[Singularity] Add documentation
fpjentzsch Aug 3, 2023
7d7d061
[Singularity] Fixes for GHA
fpjentzsch Aug 3, 2023
ed1a325
[Singularity] Fixes for GHA
fpjentzsch Aug 3, 2023
5aa85d0
[Singularity] Increase build space for GHA
fpjentzsch Aug 4, 2023
71259c5
[Singularity] Adjust build space for GHA
fpjentzsch Aug 4, 2023
3ed04b8
[Singularity] GHA fixes
fpjentzsch Aug 4, 2023
8f263bc
[Singularity] Adjust GHA
fpjentzsch Aug 4, 2023
be61c31
[Singularity] Adjust GHA
fpjentzsch Aug 4, 2023
a61ac9b
[Singularity] Adjust GHA
fpjentzsch Aug 4, 2023
c80c03c
Merge remote-tracking branch 'upstream/dev' into feature/apptainer
fpjentzsch Aug 6, 2023
cbf9f48
Skeleton for C driver generation
bwintermann Aug 15, 2023
f0bdea2
Creating templates for C and C++ drivers
bwintermann Aug 15, 2023
634ef9f
C++ kernel wip
bwintermann Aug 15, 2023
24a8066
Fixes for C++ driver
bwintermann Aug 15, 2023
201011a
Fixes for driver creation
bwintermann Aug 16, 2023
60640dd
C++ driver buffers and memory initialization
bwintermann Aug 16, 2023
c0b390c
Compilation fix
bwintermann Aug 16, 2023
89e3ff3
Inclusion of cpp drivers per submodule. Fixes for MakeCPPDriver
bwintermann Aug 17, 2023
820e8f8
Fixes for c++ driver generation
bwintermann Aug 17, 2023
ea554b5
Fixed driver export
bwintermann Aug 18, 2023
c5225e1
Temporary debugging print statements
bwintermann Sep 6, 2023
50b04d2
Updated to write out header and config files for updated cpp driver
bwintermann Oct 4, 2023
2145749
Updated finn-cpp-driver submodule to track dev branch
bwintermann Oct 4, 2023
b5aaa2d
Some path fixing
bwintermann Oct 6, 2023
cacbd55
Transfer run-docker update for singularity from PR
bwintermann Oct 17, 2023
7c39ad5
Fixed path errors during cpp driver step
bwintermann Oct 18, 2023
9b521f5
Fixed pathing issues and type name conversion
bwintermann Oct 19, 2023
1e2aea6
Datatype parser fix
bwintermann Oct 19, 2023
da51f97
Add support for U55C to FINN
LinusJungemann Oct 30, 2023
dcc0f14
Update gitignore
LinusJungemann Oct 30, 2023
b179136
Merge branch 'singularity_support' into dev
LinusJungemann Oct 30, 2023
40f9495
Periodic merge of Xilinx FINN into Eki FINN
LinusJungemann Dec 15, 2023
f4552e8
Change HBM bank allocation to use different banks for input and output
LinusJungemann Jan 16, 2024
e1303d2
Merge dev into c_driver to get c_driver up to newest version
LinusJungemann Jan 16, 2024
9e65299
Update finn integration of C++ driver
LinusJungemann Feb 1, 2024
1428ca9
Add C++ driver integration to FINN
LinusJungemann Feb 21, 2024
cd2e82f
Remove some merge artifacts#
LinusJungemann Feb 21, 2024
8bf469e
Update documentation
LinusJungemann Feb 21, 2024
fa7cef9
Remove debug printing
LinusJungemann Feb 21, 2024
7c5b3fd
Merge branch 'dev' into feature/integrateCppDriver
LinusJungemann Jun 26, 2024
6dbe116
Move high performance driver version to v1.1
LinusJungemann Jun 26, 2024
46cceaa
Merge remote-tracking branch 'upstream/dev' into feature/integrateCpp…
auphelia Feb 21, 2025
faba00a
Unify Python and C++ driver generation
LinusJungemann Mar 14, 2025
ac6e2ed
Update repo checkout and dependency building
LinusJungemann Mar 14, 2025
8b4c26d
Remove C++ driver as a submodule, because it is now cloned during run…
LinusJungemann Mar 14, 2025
113b8ea
Remove finn-plus code that is unneccesary for finn
LinusJungemann Mar 28, 2025
9c9b195
Fix small issues
LinusJungemann Apr 7, 2025
8ed6263
Fix output to shell
LinusJungemann Apr 9, 2025
7e1d6ce
Fix linting
LinusJungemann Apr 9, 2025
c709777
Fix linting
LinusJungemann Apr 9, 2025
75d5b77
Merge branch 'feature/integrateCppDriver' of github.com:LinusJungeman…
LinusJungemann Apr 9, 2025
5b63111
Revert changes to notebooks
LinusJungemann Apr 9, 2025
e2a217f
Merge remote-tracking branch 'origin/dev' into feature/integrateCppDr…
LinusJungemann Apr 9, 2025
2c2a33a
Remove replicated code
LinusJungemann Apr 9, 2025
aec60db
Add missing import
LinusJungemann Apr 17, 2025
808798a
Add license
LinusJungemann Apr 17, 2025
a3a3d38
Merge remote-tracking branch 'upstream/dev' into feature/integrateCpp…
auphelia May 21, 2025
030feeb
Remove unsused gitmodules files
auphelia May 21, 2025
d06d17e
[MakeCPPDriver] Small changes to input arguments and model metadata s…
auphelia May 21, 2025
d131907
[Deps] Remove obsolete ending from gitignore
auphelia May 26, 2025
398be02
[Transform] Update copyright header
auphelia May 26, 2025
369f003
[Driver] Move util functions
auphelia May 26, 2025
3949169
[MakeDriver] Add docstring and check for platform
auphelia May 26, 2025
ced9f82
[NBs] Change last occurrences of pynq driver import
auphelia May 26, 2025
52e230c
[Docs] Change make_pynq_driver to make_driver
auphelia May 26, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ __pycache__/*
.cache/*
.*.swp
*.ipynb_checkpoints*
*.sif

# Project files
.vscode
Expand Down
Empty file added .gitmodules
Empty file.
8 changes: 7 additions & 1 deletion src/finn/builder/build_dataflow_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ class DataflowOutputType(str, Enum):
RTLSIM_PERFORMANCE = "rtlsim_performance"
BITFILE = "bitfile"
PYNQ_DRIVER = "pynq_driver"
CPP_DRIVER = "cpp_driver"
DEPLOYMENT_PACKAGE = "deployment_package"


Expand Down Expand Up @@ -123,7 +124,7 @@ class VerificationStepType(str, Enum):
"step_measure_rtlsim_performance",
"step_out_of_context_synthesis",
"step_synthesize_bitfile",
"step_make_pynq_driver",
"step_make_driver",
"step_deployment_package",
]

Expand Down Expand Up @@ -356,6 +357,11 @@ class DataflowBuildConfig:
#: rtlsim, otherwise they will be replaced by RTL implementations.
rtlsim_use_vivado_comps: Optional[bool] = True

#: Determine if the C++ driver should be generated instead of the PYNQ driver
#: If set to latest newest version will be used
#: If set to commit hash specified version will be used
cpp_driver_version: Optional[str] = "latest"

def _resolve_hls_clk_period(self):
if self.hls_clk_period_ns is None:
# use same clk for synth and hls if not explicitly specified
Expand Down
30 changes: 24 additions & 6 deletions src/finn/builder/build_dataflow_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
from finn.transformation.fpgadataflow.insert_dwc import InsertDWC
from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO
from finn.transformation.fpgadataflow.make_pynq_driver import MakePYNQDriver
from finn.transformation.fpgadataflow.make_driver import MakeCPPDriver, MakePYNQDriver
from finn.transformation.fpgadataflow.make_zynq_proj import ZynqBuild
from finn.transformation.fpgadataflow.minimize_accumulator_width import (
MinimizeAccumulatorWidth,
Expand Down Expand Up @@ -732,15 +732,33 @@ def step_measure_rtlsim_performance(model: ModelWrapper, cfg: DataflowBuildConfi
return model


def step_make_pynq_driver(model: ModelWrapper, cfg: DataflowBuildConfig):
"""Create a PYNQ Python driver that can be used to interface the generated
accelerator."""
def step_make_driver(model: ModelWrapper, cfg: DataflowBuildConfig):
"""Create a driver that can be used to interface the generated accelerator.
Use DataflowBuildConfig to select PYNQ Python or C++ driver."""

driver_dir = os.path.join(cfg.output_dir, "driver")
if DataflowOutputType.PYNQ_DRIVER in cfg.generate_outputs:
driver_dir = cfg.output_dir + "/driver"
# generate PYNQ driver
model = model.transform(MakePYNQDriver(cfg._resolve_driver_platform()))
shutil.copytree(model.get_metadata_prop("pynq_driver_dir"), driver_dir, dirs_exist_ok=True)
print("PYNQ Python driver written into " + driver_dir)
elif DataflowOutputType.CPP_DRIVER in cfg.generate_outputs:
# generate C++ Driver

model = model.transform(
MakeCPPDriver(
cfg._resolve_driver_platform(),
build_dir=cfg.output_dir,
version=cfg.cpp_driver_version,
driver_dir=driver_dir,
)
)
print("C++ driver written into " + driver_dir)
else:
warnings.warn(
"The step step_make_driver is in the build list but will not be executed"
+ " since no driver is selected in generate_outputs in your build.py file!"
)
return model


Expand Down Expand Up @@ -862,7 +880,7 @@ def step_deployment_package(model: ModelWrapper, cfg: DataflowBuildConfig):
"step_set_fifo_depths": step_set_fifo_depths,
"step_create_stitched_ip": step_create_stitched_ip,
"step_measure_rtlsim_performance": step_measure_rtlsim_performance,
"step_make_pynq_driver": step_make_pynq_driver,
"step_make_driver": step_make_driver,
"step_out_of_context_synthesis": step_out_of_context_synthesis,
"step_synthesize_bitfile": step_synthesize_bitfile,
"step_deployment_package": step_deployment_package,
Expand Down
120 changes: 120 additions & 0 deletions src/finn/transformation/fpgadataflow/get_driver_shapes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import numpy as np
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.custom_op.registry import getCustomOp
from qonnx.util.basic import gen_finn_dt_tensor, roundup_to_integer_multiple
from typing import Dict

import finn.util.data_packing as dpk
from finn.util.data_packing import (
hexstring2npbytearray,
pack_innermost_dim_as_hex_string,
)

# TODO: License?


def to_external_tensor(init, w_dtype):
"""Return an appropriately formatted and packed numpy byte array for given
external parameter tensor."""

weight_width = init.shape[1] * w_dtype.bitwidth()
weight_width_padded = roundup_to_integer_multiple(weight_width, 4)
hex_init = pack_innermost_dim_as_hex_string(init, w_dtype, weight_width_padded, prefix="0x")
ext_weight = np.array([], dtype=np.uint8)
for line in hex_init:
array_line = [x for x in reversed(hexstring2npbytearray(line, remove_prefix="0x"))]
ext_weight = np.append(ext_weight, array_line)

return ext_weight


def get_driver_shapes(model: ModelWrapper) -> Dict:
idt = []
idma_names = []
ishape_normal = []
ishape_folded = []
ishape_packed = []
for idma_ind, graph_in in enumerate(model.graph.input):
i_tensor_name = graph_in.name
# get inp tensor properties
i_tensor_dt = model.get_tensor_datatype(i_tensor_name)
i_tensor_shape_normal = tuple(model.get_tensor_shape(i_tensor_name))
# go down into dataflow partition to get folded shape info etc
# TODO consider setting these as attributes during dataflow partitioning
i_consumer = model.find_consumer(i_tensor_name)
assert (
i_consumer.op_type == "StreamingDataflowPartition"
), """
Ensure CreateDataflowPartition called before driver creation."""
first_df_model = ModelWrapper(getCustomOp(i_consumer).get_nodeattr("model"))
assert (
first_df_model.graph.node[0].op_type == "IODMA_hls"
), "First partition must hold input IODMA"
successors = model.find_direct_successors(i_consumer)
successor_input_num = list(successors[0].input).index(i_consumer.output[0])
successor_sdp = getCustomOp(successors[0])
successor_df_model = ModelWrapper(successor_sdp.get_nodeattr("model"))
first_node = successor_df_model.find_consumer(
successor_df_model.graph.input[successor_input_num].name
)
i_tensor_shape_folded = tuple(getCustomOp(first_node).get_folded_input_shape())
# generate dummy folded i/o tensors and their packed versions
i_tensor_dummy_folded = gen_finn_dt_tensor(i_tensor_dt, i_tensor_shape_folded)
i_tensor_dummy_packed = dpk.finnpy_to_packed_bytearray(i_tensor_dummy_folded, i_tensor_dt)
i_tensor_shape_packed = i_tensor_dummy_packed.shape
# append all input tensor info to relevant lists
idt.append("DataType['%s']" % i_tensor_dt.name)
ishape_normal.append(i_tensor_shape_normal)
ishape_folded.append(i_tensor_shape_folded)
ishape_packed.append(i_tensor_shape_packed)
idma_names.append(getCustomOp(i_consumer).get_nodeattr("instance_name"))

odt = []
odma_names = []
oshape_normal = []
oshape_folded = []
oshape_packed = []
for odma_ind, graph_out in enumerate(model.graph.output):
o_tensor_name = graph_out.name
# get inp tensor properties
o_tensor_dt = model.get_tensor_datatype(o_tensor_name)
o_tensor_shape_normal = tuple(model.get_tensor_shape(o_tensor_name))
# go down into IODMA partition to get folded shape info etc
# TODO consider setting these as attributes during dataflow partitioning
o_producer = model.find_producer(o_tensor_name)
assert (
o_producer.op_type == "StreamingDataflowPartition"
), """
Ensure CreateDataflowPartition called before driver creation."""
df_model = ModelWrapper(getCustomOp(o_producer).get_nodeattr("model"))
assert df_model.graph.node[-1].op_type == "IODMA_hls", "Partition must hold output IODMA"
predecessors = model.find_direct_predecessors(o_producer)
predecessor_output_num = list(predecessors[0].output).index(o_producer.input[0])
predecessor_sdp = getCustomOp(predecessors[0])
predecessor_df_model = ModelWrapper(predecessor_sdp.get_nodeattr("model"))
last_node = predecessor_df_model.find_producer(
predecessor_df_model.graph.output[predecessor_output_num].name
)
o_tensor_shape_folded = tuple(getCustomOp(last_node).get_folded_output_shape())
o_tensor_dummy_folded = gen_finn_dt_tensor(o_tensor_dt, o_tensor_shape_folded)
o_tensor_dummy_packed = dpk.finnpy_to_packed_bytearray(o_tensor_dummy_folded, o_tensor_dt)
o_tensor_shape_packed = o_tensor_dummy_packed.shape
# append all output tensor info to relevant lists
odt.append("DataType['%s']" % o_tensor_dt.name)
oshape_normal.append(o_tensor_shape_normal)
oshape_folded.append(o_tensor_shape_folded)
oshape_packed.append(o_tensor_shape_packed)
odma_names.append(getCustomOp(o_producer).get_nodeattr("instance_name"))

return {
"idt": idt,
"idma_names": idma_names,
"ishape_normal": ishape_normal,
"ishape_folded": ishape_folded,
"ishape_packed": ishape_packed,
"odt": odt,
"odma_names": odma_names,
"oshape_normal": oshape_normal,
"oshape_folded": oshape_folded,
"oshape_packed": oshape_packed,
}
Loading
Loading