diff --git a/2024.1/html.tar.gz b/2024.1/html.tar.gz deleted file mode 100644 index 9da6bd7da3c..00000000000 Binary files a/2024.1/html.tar.gz and /dev/null differ diff --git a/2024.2/html/.buildinfo b/2024.2/html/.buildinfo new file mode 100644 index 00000000000..f115f3173b7 --- /dev/null +++ b/2024.2/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 33db1a10b22db3e7c58b1d90c8298992 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/2024.2/html/.doctrees/build.doctree b/2024.2/html/.doctrees/build.doctree new file mode 100644 index 00000000000..25b2008b795 Binary files /dev/null and b/2024.2/html/.doctrees/build.doctree differ diff --git a/2024.2/html/.doctrees/cloud_vendor_support.doctree b/2024.2/html/.doctrees/cloud_vendor_support.doctree new file mode 100644 index 00000000000..7948916d324 Binary files /dev/null and b/2024.2/html/.doctrees/cloud_vendor_support.doctree differ diff --git a/2024.2/html/.doctrees/debug-faq.doctree b/2024.2/html/.doctrees/debug-faq.doctree new file mode 100644 index 00000000000..681fde30e79 Binary files /dev/null and b/2024.2/html/.doctrees/debug-faq.doctree differ diff --git a/2024.2/html/.doctrees/environment.pickle b/2024.2/html/.doctrees/environment.pickle new file mode 100644 index 00000000000..8faccaba3e7 Binary files /dev/null and b/2024.2/html/.doctrees/environment.pickle differ diff --git a/2024.2/html/.doctrees/ert.main.doctree b/2024.2/html/.doctrees/ert.main.doctree new file mode 100644 index 00000000000..7b1e70c5688 Binary files /dev/null and b/2024.2/html/.doctrees/ert.main.doctree differ diff --git a/2024.2/html/.doctrees/execution-model.doctree b/2024.2/html/.doctrees/execution-model.doctree new file mode 100644 index 00000000000..ae28cc6379c Binary files /dev/null and b/2024.2/html/.doctrees/execution-model.doctree differ diff --git a/2024.2/html/.doctrees/formats.doctree b/2024.2/html/.doctrees/formats.doctree new file mode 100644 index 00000000000..cbab09986ad Binary files /dev/null and b/2024.2/html/.doctrees/formats.doctree differ diff --git a/2024.2/html/.doctrees/fpga_device_ready.doctree b/2024.2/html/.doctrees/fpga_device_ready.doctree new file mode 100644 index 00000000000..e9ba0d809c6 Binary files /dev/null and b/2024.2/html/.doctrees/fpga_device_ready.doctree differ diff --git a/2024.2/html/.doctrees/hm.doctree b/2024.2/html/.doctrees/hm.doctree new file mode 100644 index 00000000000..ee1aff5f7e5 Binary files /dev/null and b/2024.2/html/.doctrees/hm.doctree differ diff --git a/2024.2/html/.doctrees/index.doctree b/2024.2/html/.doctrees/index.doctree new file mode 100644 index 00000000000..d83f357f1de Binary files /dev/null and b/2024.2/html/.doctrees/index.doctree differ diff --git a/2024.2/html/.doctrees/install.doctree b/2024.2/html/.doctrees/install.doctree new file mode 100644 index 00000000000..8cf87fdbd74 Binary files /dev/null and b/2024.2/html/.doctrees/install.doctree differ diff --git a/2024.2/html/.doctrees/m2m.doctree b/2024.2/html/.doctrees/m2m.doctree new file mode 100644 index 00000000000..5041cfe56bf Binary files /dev/null and b/2024.2/html/.doctrees/m2m.doctree differ diff --git a/2024.2/html/.doctrees/mailbox.main.doctree b/2024.2/html/.doctrees/mailbox.main.doctree new file mode 100644 index 00000000000..4c2d2b2270c Binary files /dev/null and b/2024.2/html/.doctrees/mailbox.main.doctree differ diff --git a/2024.2/html/.doctrees/mailbox.proto.doctree b/2024.2/html/.doctrees/mailbox.proto.doctree new file mode 100644 index 00000000000..d7c357b6c40 Binary files /dev/null and b/2024.2/html/.doctrees/mailbox.proto.doctree differ diff --git a/2024.2/html/.doctrees/mgmt-ioctl.main.doctree b/2024.2/html/.doctrees/mgmt-ioctl.main.doctree new file mode 100644 index 00000000000..6e5f72e8aa6 Binary files /dev/null and b/2024.2/html/.doctrees/mgmt-ioctl.main.doctree differ diff --git a/2024.2/html/.doctrees/multiprocess.doctree b/2024.2/html/.doctrees/multiprocess.doctree new file mode 100644 index 00000000000..ff68bcaa331 Binary files /dev/null and b/2024.2/html/.doctrees/multiprocess.doctree differ diff --git a/2024.2/html/.doctrees/nagios_plugin.doctree b/2024.2/html/.doctrees/nagios_plugin.doctree new file mode 100644 index 00000000000..128942813be Binary files /dev/null and b/2024.2/html/.doctrees/nagios_plugin.doctree differ diff --git a/2024.2/html/.doctrees/newxsa-bringup.doctree b/2024.2/html/.doctrees/newxsa-bringup.doctree new file mode 100644 index 00000000000..af5bbc37c2c Binary files /dev/null and b/2024.2/html/.doctrees/newxsa-bringup.doctree differ diff --git a/2024.2/html/.doctrees/opencl_extension.doctree b/2024.2/html/.doctrees/opencl_extension.doctree new file mode 100644 index 00000000000..aa8e4665b8c Binary files /dev/null and b/2024.2/html/.doctrees/opencl_extension.doctree differ diff --git a/2024.2/html/.doctrees/p2p.doctree b/2024.2/html/.doctrees/p2p.doctree new file mode 100644 index 00000000000..f44e47ff72d Binary files /dev/null and b/2024.2/html/.doctrees/p2p.doctree differ diff --git a/2024.2/html/.doctrees/platforms.doctree b/2024.2/html/.doctrees/platforms.doctree new file mode 100644 index 00000000000..7902342c468 Binary files /dev/null and b/2024.2/html/.doctrees/platforms.doctree differ diff --git a/2024.2/html/.doctrees/platforms_partitions.doctree b/2024.2/html/.doctrees/platforms_partitions.doctree new file mode 100644 index 00000000000..abe466b9096 Binary files /dev/null and b/2024.2/html/.doctrees/platforms_partitions.doctree differ diff --git a/2024.2/html/.doctrees/pyxrt.doctree b/2024.2/html/.doctrees/pyxrt.doctree new file mode 100644 index 00000000000..86077735286 Binary files /dev/null and b/2024.2/html/.doctrees/pyxrt.doctree differ diff --git a/2024.2/html/.doctrees/security.doctree b/2024.2/html/.doctrees/security.doctree new file mode 100644 index 00000000000..a8c8b76cb86 Binary files /dev/null and b/2024.2/html/.doctrees/security.doctree differ diff --git a/2024.2/html/.doctrees/sysfs.doctree b/2024.2/html/.doctrees/sysfs.doctree new file mode 100644 index 00000000000..231fd54066b Binary files /dev/null and b/2024.2/html/.doctrees/sysfs.doctree differ diff --git a/2024.2/html/.doctrees/system_requirements.doctree b/2024.2/html/.doctrees/system_requirements.doctree new file mode 100644 index 00000000000..8b93cf15847 Binary files /dev/null and b/2024.2/html/.doctrees/system_requirements.doctree differ diff --git a/2024.2/html/.doctrees/test.doctree b/2024.2/html/.doctrees/test.doctree new file mode 100644 index 00000000000..1e9dbb21397 Binary files /dev/null and b/2024.2/html/.doctrees/test.doctree differ diff --git a/2024.2/html/.doctrees/vsec.doctree b/2024.2/html/.doctrees/vsec.doctree new file mode 100644 index 00000000000..7e0ed09f4ff Binary files /dev/null and b/2024.2/html/.doctrees/vsec.doctree differ diff --git a/2024.2/html/.doctrees/xball.doctree b/2024.2/html/.doctrees/xball.doctree new file mode 100644 index 00000000000..872fabae54c Binary files /dev/null and b/2024.2/html/.doctrees/xball.doctree differ diff --git a/2024.2/html/.doctrees/xbflash2.doctree b/2024.2/html/.doctrees/xbflash2.doctree new file mode 100644 index 00000000000..e54a8f6ae00 Binary files /dev/null and b/2024.2/html/.doctrees/xbflash2.doctree differ diff --git a/2024.2/html/.doctrees/xbmgmt.doctree b/2024.2/html/.doctrees/xbmgmt.doctree new file mode 100644 index 00000000000..abb47c4c58f Binary files /dev/null and b/2024.2/html/.doctrees/xbmgmt.doctree differ diff --git a/2024.2/html/.doctrees/xbtools_map.doctree b/2024.2/html/.doctrees/xbtools_map.doctree new file mode 100644 index 00000000000..ea251ab7e07 Binary files /dev/null and b/2024.2/html/.doctrees/xbtools_map.doctree differ diff --git a/2024.2/html/.doctrees/xbtop.doctree b/2024.2/html/.doctrees/xbtop.doctree new file mode 100644 index 00000000000..8ec95becb3f Binary files /dev/null and b/2024.2/html/.doctrees/xbtop.doctree differ diff --git a/2024.2/html/.doctrees/xbutil.doctree b/2024.2/html/.doctrees/xbutil.doctree new file mode 100644 index 00000000000..b8107c0624d Binary files /dev/null and b/2024.2/html/.doctrees/xbutil.doctree differ diff --git a/2024.2/html/.doctrees/xclbintools.doctree b/2024.2/html/.doctrees/xclbintools.doctree new file mode 100644 index 00000000000..723b895f7ba Binary files /dev/null and b/2024.2/html/.doctrees/xclbintools.doctree differ diff --git a/2024.2/html/.doctrees/xocl_ioctl.main.doctree b/2024.2/html/.doctrees/xocl_ioctl.main.doctree new file mode 100644 index 00000000000..23f6bc34bc3 Binary files /dev/null and b/2024.2/html/.doctrees/xocl_ioctl.main.doctree differ diff --git a/2024.2/html/.doctrees/xrt_ini.doctree b/2024.2/html/.doctrees/xrt_ini.doctree new file mode 100644 index 00000000000..31e84fd915e Binary files /dev/null and b/2024.2/html/.doctrees/xrt_ini.doctree differ diff --git a/2024.2/html/.doctrees/xrt_kernel_executions.doctree b/2024.2/html/.doctrees/xrt_kernel_executions.doctree new file mode 100644 index 00000000000..7092ee7223e Binary files /dev/null and b/2024.2/html/.doctrees/xrt_kernel_executions.doctree differ diff --git a/2024.2/html/.doctrees/xrt_native.main.doctree b/2024.2/html/.doctrees/xrt_native.main.doctree new file mode 100644 index 00000000000..5fa75bcb035 Binary files /dev/null and b/2024.2/html/.doctrees/xrt_native.main.doctree differ diff --git a/2024.2/html/.doctrees/xrt_native_apis.doctree b/2024.2/html/.doctrees/xrt_native_apis.doctree new file mode 100644 index 00000000000..31e62a78262 Binary files /dev/null and b/2024.2/html/.doctrees/xrt_native_apis.doctree differ diff --git a/2024.2/html/.doctrees/yocto.doctree b/2024.2/html/.doctrees/yocto.doctree new file mode 100644 index 00000000000..ad09cd0765e Binary files /dev/null and b/2024.2/html/.doctrees/yocto.doctree differ diff --git a/2024.2/html/.doctrees/zocl_ioctl.main.doctree b/2024.2/html/.doctrees/zocl_ioctl.main.doctree new file mode 100644 index 00000000000..29c1518da86 Binary files /dev/null and b/2024.2/html/.doctrees/zocl_ioctl.main.doctree differ diff --git a/2024.2/html/_images/Alveo-Compilation-Flow.svg b/2024.2/html/_images/Alveo-Compilation-Flow.svg new file mode 100644 index 00000000000..1e926cc4861 --- /dev/null +++ b/2024.2/html/_images/Alveo-Compilation-Flow.svg @@ -0,0 +1,234 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + v++ + + + + + + + + a.xclbin + + + + + + + + + + + + + + + + + + + + + + g++ + + + + + + + + a.out + + + + + + + + + + + + + + + + + + + + + + XRT + + + + + + + + + + + + C/C++/VHDL/Verilog + + + + + + + + + + + + C/C++ + + + + + + + + + + + + + + + + + + + + + + + + Python + + + + + + + + + + + + + + + + Device + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/M2M-transfer.svg b/2024.2/html/_images/M2M-transfer.svg new file mode 100644 index 00000000000..b18576cc7b5 --- /dev/null +++ b/2024.2/html/_images/M2M-transfer.svg @@ -0,0 +1,868 @@ + + + + + + + + + + + + + + Page-1 + + + Sheet.2 + + + + + + + + + + + diff --git a/2024.2/html/_images/PCIe-P2P.svg b/2024.2/html/_images/PCIe-P2P.svg new file mode 100644 index 00000000000..775639cfeaf --- /dev/null +++ b/2024.2/html/_images/PCIe-P2P.svg @@ -0,0 +1,422 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + U50 + + + + + + + + U50 + + + + + + + + HBM + + + + + + + + + + + + + + NVMe + + + + + + + + U200 + + + + + + + + DDR + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BO + + + + + + + + PCIe + + + + + + + + + + + + + + + + + + + + + + + HOSTCPU + + + + + + + + RAM + + + + + + + + + + + + + + + + + + + + HBM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + BO + + + + + + + + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XRT-Architecture-Edge.svg b/2024.2/html/_images/XRT-Architecture-Edge.svg new file mode 100644 index 00000000000..0175e6aaf52 --- /dev/null +++ b/2024.2/html/_images/XRT-Architecture-Edge.svg @@ -0,0 +1,137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XRT Runtime Libraries + + + + + + + PL-PR Region + + + + + + + PS + + + + + + + ZOCL + + + + + + + + ThinShell + + + + + + + + PL-DDR + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XRT-Architecture-Hybrid.svg b/2024.2/html/_images/XRT-Architecture-Hybrid.svg new file mode 100644 index 00000000000..8b693b4ac58 --- /dev/null +++ b/2024.2/html/_images/XRT-Architecture-Hybrid.svg @@ -0,0 +1,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XOCL + + + + + + + XCLMGMT + + + + + + + XRT Runtime Libraries + + + + + + + USER PF + + + + + + + MGMT PF + + + + + + + + + + + + + + + + + PL-PR Region + + + + + + + PS + + + + + + + ZOCL-ERT + + + + + + + MailBox + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XRT-Architecture-PCIe.svg b/2024.2/html/_images/XRT-Architecture-PCIe.svg new file mode 100644 index 00000000000..ecbcbd477b4 --- /dev/null +++ b/2024.2/html/_images/XRT-Architecture-PCIe.svg @@ -0,0 +1,156 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XOCL + + + + + + + XCLMGMT + + + + + + + XRT Runtime Libraries + + + + + + + USER PF + + + + + + + MGMT PF + + + + + + + + + + + + + + + + + PR Region + + + + + + + MicroBlaze + + + + + + + ERT + + + + + + + MailBox + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XRT-Architecture-Versal-Edge.svg b/2024.2/html/_images/XRT-Architecture-Versal-Edge.svg new file mode 100644 index 00000000000..dffd9368b78 --- /dev/null +++ b/2024.2/html/_images/XRT-Architecture-Versal-Edge.svg @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XRT Runtime Libraries + + + + + + + PL-PR Region + + + + + + + PS + + + + + + + ZOCL + + + + + + + + ThinShell + + + + + + + + LPDDR + + + + + + + AIE + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XRT-Layers.svg b/2024.2/html/_images/XRT-Layers.svg new file mode 100644 index 00000000000..7e653e77b4c --- /dev/null +++ b/2024.2/html/_images/XRT-Layers.svg @@ -0,0 +1,1599 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + UltraScale PCIe Device + + + + + + + + Linux PCIe Drivers + + + + + + + + + + MB sched + + + + + + + + User Space + + + + + + + + OCL + + + + + + + + Video + + + + + + + + Python/C/C++ + + + + + + + + ML + + + + + + + + Storage + + + + + + XRT Software Stack + + + + + + + + + + xmgmt + + + + + + + + + + libxrt_core + + + + + + + + + + xocl + + + + + + + + Linux MPSoC Driver + + + + + + + + + + zocl + + + + + + + + + + libxmaapi + + + + + + + + + + libxilinxopencl + + + + + + + + + + MPSoC PCIe Device + + + + + + + + + + ARM sched + + + + + + + + MPSoC Edge Device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + xbutil + + + + + + + + + + xclbinutil + + + + + + + + + + xbmgmt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Application + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Libraries & ToolsCore APIs, Emulation, Profiler, Debugger, Board Tools, Virtualization plugin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Linux KernelMemory management, Execution control, DMA operationsDevice management/monitoring,Compiled image download + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + HW & FirmwarePlatform shell, DMA engineHW scheduler, Download engine Memory controller, MailboxSecurity / Firewall + + + + + + + + + + MPD/MSD + + + + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XSA-shell-partitions-1RP.svg b/2024.2/html/_images/XSA-shell-partitions-1RP.svg new file mode 100644 index 00000000000..f2860c395c5 --- /dev/null +++ b/2024.2/html/_images/XSA-shell-partitions-1RP.svg @@ -0,0 +1,1357 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FLASH + + + + + + + + + + + + + + + SHELL + + + + + + + + SHELLVSEC-UUID.xsabin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VSECUUID + + + + + + + + + + + + + + + USER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + USER-UUID.xclbin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PoR load from flash + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Metadata and bitstreams loaded by xclmgmt driver + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Alveo platformpartitions + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XSA-shell-partitions-2RP.svg b/2024.2/html/_images/XSA-shell-partitions-2RP.svg new file mode 100644 index 00000000000..7965d9aeda9 --- /dev/null +++ b/2024.2/html/_images/XSA-shell-partitions-2RP.svg @@ -0,0 +1,1859 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FLASH + + + + + + + + + + + + + + + BASE + + + + + + + + BASEVSEC-UUID.xsabin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VSECUUID + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SHELL + + + + + + + + + + SHELLLOGIC-UUID.xsabin + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LOGICUUID + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + USER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + USER-UUID.xclbin + + + + + + + + + + + + + + PoR load from flash + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Metadata and bitstreams loaded by xclmgmt driver + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Alveo platformpartitions + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/XSA-shell.svg b/2024.2/html/_images/XSA-shell.svg new file mode 100644 index 00000000000..ee5930e58b6 --- /dev/null +++ b/2024.2/html/_images/XSA-shell.svg @@ -0,0 +1,1287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + DFX Boundary + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PCIe + + + + + + + + XDMA/QDMA + + + + + + + + ICAP + + + + + + + + + + + + + + PROM + + + + + + + + ERT + + + + + + Shell + + + + + + + + PCIe PF Demux + + + + + + + + PF0 + + + + + + + + + + + + + + Box + + + + + + + + + + + + + + + + CMC + + + + + + + + Clocks + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Mail + + + + + + + + + + + + + + + QSPICtrl + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FireWall + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + FireWall + + + + + + + + FireWall + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Control + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + P2P + + + + + + + + PF1 + + + + + + + + + + + + + + + + + + + + + + + + UART + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SB + + + + + + + + FireWall + + + + + + + + DFX Isolation + + + + + + + + + CU0 + + + + + + + + MemSS + + + + + + + User compiled program with 2 compute units + + + + + + + + CU1 + + + + + + User Partition + + + + + + + + DDR + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Control + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Data + + + + + + + + + BMC/SC + + + + + + + + + + + \ No newline at end of file diff --git a/2024.2/html/_images/ap_ctrl_chain_2.PNG b/2024.2/html/_images/ap_ctrl_chain_2.PNG new file mode 100644 index 00000000000..d57ceeaa1fa Binary files /dev/null and b/2024.2/html/_images/ap_ctrl_chain_2.PNG differ diff --git a/2024.2/html/_images/ap_ctrl_hs_2.PNG b/2024.2/html/_images/ap_ctrl_hs_2.PNG new file mode 100644 index 00000000000..c30be6ac7ea Binary files /dev/null and b/2024.2/html/_images/ap_ctrl_hs_2.PNG differ diff --git a/2024.2/html/_images/graphviz-e3642d1d054f86a195c477303a09c70986745b9d.svg b/2024.2/html/_images/graphviz-e3642d1d054f86a195c477303a09c70986745b9d.svg new file mode 100644 index 00000000000..fc12e2a9a77 --- /dev/null +++ b/2024.2/html/_images/graphviz-e3642d1d054f86a195c477303a09c70986745b9d.svg @@ -0,0 +1,546 @@ + + + + + + +G + + +cluster_0 + +process #0 + + +cluster_1 + +process #1 + + +cluster_2 + +process #2 + + +cluster_3 + +process #3 + + +cluster_4 + +process #4 + + +cluster_5 + +process #5 + + +cluster_6 + +process #6 + + + +a0 + +xclOpen() + + + +a1 + +xclLoadXclBin(UUID_X) + + + +a0->a1 + + + + + +a2 + +xclOpenContext(UUID_X, CU_0) + + + +a1->a2 + + + + + +a3 + +xclExecBuf(CU_0) + + + +a2->a3 + + + + + +a4 + +xclExecWait() + + + +a3->a4 + + + + + +a5 + +xclCloseContext(UUID_X, CU_0) + + + +a4->a5 + + + + + +a6 + +xclclose() + + + +a5->a6 + + + + + +b0 + +xclOpen() + + + +b7 + +NOP + + + +b0->b7 + + + + + +b2 + +xclOpenContext(UUID_X, CU_0) + + + +b3 + +xclExecBuf(CU_0) + + + +b2->b3 + + + + + +b4 + +xclExecWait() + + + +b3->b4 + + + + + +b5 + +xclCloseContext(UUID_X, CU_0) + + + +b4->b5 + + + + + +b6 + +xclclose() + + + +b5->b6 + + + + + +b7->b2 + + + + + +c0 + +xclOpen() + + + +c7 + +NOP + + + +c0->c7 + + + + + +c2 + +xclOpenContext(UUID_X, CU_1) + + + +c3 + +xclExecBuf(CU_1) + + + +c2->c3 + + + + + +c4 + +xclExecWait() + + + +c3->c4 + + + + + +c5 + +xclCloseContext(UUID_X, CU_1) + + + +c4->c5 + + + + + +c6 + +xclclose() + + + +c5->c6 + + + + + +c7->c2 + + + + + +d0 + +xclOpen() + + + +d1 + +xclLoadXclBin(UUID_X) + + + +d0->d1 + + + + + +d2 + +xclOpenContext(UUID_X, CU_0) + + + +d1->d2 + + + + + +d3 + +xclExecBuf(CU_0) + + + +d2->d3 + + + + + +d4 + +xclExecWait() + + + +d3->d4 + + + + + +d5 + +xclCloseContext(UUID_X, CU_0) + + + +d4->d5 + + + + + +d6 + +xclclose() + + + +d5->d6 + + + + + +e0 + +xclOpen() + + + +e7 + +NOP + + + +e0->e7 + + + + + +e1 + +xclLoadXclBin(UUID_X) + + + +e2 + +xclOpenContext(UUID_X, CU_1) + + + +e1->e2 + + + + + +e3 + +xclExecBuf(CU_1) + + + +e2->e3 + + + + + +e4 + +xclExecWait() + + + +e3->e4 + + + + + +e5 + +xclCloseContext(UUID_X, CU_1) + + + +e4->e5 + + + + + +e6 + +xclclose() + + + +e5->e6 + + + + + +e7->e1 + + + + + +f0 + +xclOpen() + + + +f7 + +NOP + + + +f0->f7 + + + + + +f1 + +xclLoadXclBin(UUID_Y) + + + +f3 + +xclclose() + + + +f1->f3 + + + + + +f7->f1 + + + + + +g0 + +xclOpen() + + + +g7 + +NOP + + + +g0->g7 + + + + + +g1 + +xclLoadXclBin(UUID_X) + + + +g2 + +xclOpenContext(UUID_Y, CU_0) + + + +g1->g2 + + + + + +g3 + +xclclose() + + + +g2->g3 + + + + + +g7->g1 + + + + + diff --git a/2024.2/html/_images/mailbox-msd-mpd-architecture.svg b/2024.2/html/_images/mailbox-msd-mpd-architecture.svg new file mode 100644 index 00000000000..9829f067df6 --- /dev/null +++ b/2024.2/html/_images/mailbox-msd-mpd-architecture.svg @@ -0,0 +1,1190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2024.2/html/_images/pass-through.PNG b/2024.2/html/_images/pass-through.PNG new file mode 100644 index 00000000000..f2a17da4010 Binary files /dev/null and b/2024.2/html/_images/pass-through.PNG differ diff --git a/2024.2/html/_images/q35-0_LI.jpg b/2024.2/html/_images/q35-0_LI.jpg new file mode 100644 index 00000000000..d2a215f9b64 Binary files /dev/null and b/2024.2/html/_images/q35-0_LI.jpg differ diff --git a/2024.2/html/_images/q35-1_LI.jpg b/2024.2/html/_images/q35-1_LI.jpg new file mode 100644 index 00000000000..679fcb4b25f Binary files /dev/null and b/2024.2/html/_images/q35-1_LI.jpg differ diff --git a/2024.2/html/_images/sw-mailbox-mpd-plugin-download.svg b/2024.2/html/_images/sw-mailbox-mpd-plugin-download.svg new file mode 100644 index 00000000000..eb424aa81ed --- /dev/null +++ b/2024.2/html/_images/sw-mailbox-mpd-plugin-download.svg @@ -0,0 +1,1229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2024.2/html/_images/sw-mailbox-msd-mpd-download.svg b/2024.2/html/_images/sw-mailbox-msd-mpd-download.svg new file mode 100644 index 00000000000..49bec9ff94c --- /dev/null +++ b/2024.2/html/_images/sw-mailbox-msd-mpd-download.svg @@ -0,0 +1,1251 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2024.2/html/_images/xclbin_navigation.png b/2024.2/html/_images/xclbin_navigation.png new file mode 100644 index 00000000000..d5b7bc8bf14 Binary files /dev/null and b/2024.2/html/_images/xclbin_navigation.png differ diff --git a/2024.2/html/_images/xrt-deployment-cloud.svg b/2024.2/html/_images/xrt-deployment-cloud.svg new file mode 100644 index 00000000000..de70584904e --- /dev/null +++ b/2024.2/html/_images/xrt-deployment-cloud.svg @@ -0,0 +1,1776 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2024.2/html/_sources/build.rst.txt b/2024.2/html/_sources/build.rst.txt new file mode 100644 index 00000000000..7fab209565b --- /dev/null +++ b/2024.2/html/_sources/build.rst.txt @@ -0,0 +1,87 @@ +.. _build.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Building the XRT Software Stack +------------------------------- + +Building the XRT Installation Package +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Installing Building Dependencies +................................ + +XRT requires C++14 compiler and a few development libraries bundled +with modern Linux distribution. Please install the necessary tools and +dependencies using the provided ``xrtdeps.sh``. + +:: + + sudo /src/runtime_src/tools/scripts/xrtdeps.sh + +The ``xrtdeps.sh`` script installs the standard distribution packages +for the tools and libraries XRT depends on. If any system libraries +XRT depends on (for example Boost libraries) are updated to non +standard versions, then XRT must be rebuilt. + +On RHEL7.x/CentOS7.x use devtoolset to switch to C++14 devlopment +environment. This step is not applicable to Ubuntu, which already has +C++14 capable GCC. + +:: + + scl enable devtoolset-9 bash + +XRT includes source code for ERT firmware. +It needs to be compiled with the MicroBlaze GCC compiler, which is available in `Xilinx Vitis™ Software Platform `_. +To generate a complete XRT package, please install Vitis™ Software Platform and setup XILINX_VITIS environment variable. +If XILINX_VITIS is not available in the build system, the building and packaging steps for ERT will be skipped. +On the deployment system, XRT will try to find the ERT firmware in ``/lib/firmware/xilinx`` directory. +If it's not available, errors will be reported. + + +Building the XRT Runtime +........................ + +:: + + cd build + ./build.sh + +``build.sh`` script builds for both Debug and Release profiles. + +On RHEL/CentOS, if ``build.sh`` was accidentally run prior to enabling +the devtoolset, then it is necessary to clean stale files makefiles by +running ``build.sh clean`` prior to the next build. + +Please check ERT firmware is built properly at ``build/Release/opt/xilinx/xrt/share/fw/sched*.bin``. + + +Packaging RPM on RHEL/CentOS or DEB on Ubuntu +............................................. + +The package is automatically built for the ``Release`` +version but not for the ``Debug`` version:: + + cd build/Release + make package + cd ../Debug + make package + + + +Building the XRT Documentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +XRT Documentation can be built automatically using ``Sphinx`` doc builder +together with Linux kernel based ``kernel-doc`` utility. + +To compile and install the documentation into the ``doc`` directory at +the top of the repository:: + + cd build + ./build.sh docs + # To browse the generated local documentation with a web browser: + xdg-open Release/runtime_src/doc/html/index.html diff --git a/2024.2/html/_sources/cloud_vendor_support.rst.txt b/2024.2/html/_sources/cloud_vendor_support.rst.txt new file mode 100644 index 00000000000..9966ca13b25 --- /dev/null +++ b/2024.2/html/_sources/cloud_vendor_support.rst.txt @@ -0,0 +1,222 @@ +.. _cloud_vendor_support.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +MSD/MPD and Plugins +******************* + +Overview +======== + +When FPGAs are deployed at cloud vendors, either VM based (IaaS) or container based (PaaS), there are +some common concerns need to be addressed. + +1. MGMT PF and USER PF of the FPGA are separated + + Cloud vendors own the MGMT PF, while users own the USER PF. Any operations by the user on USER PF + should not damage or compromise the operation of MGMT PF. + +2. xclbin files needs to be protected + + Some of the xclbin files are provided by third-party ISVs -- they don't want the users access their + xclbin files, but use them indirectly. That is, the xclbin files in user VM or container are not the + real ones that are running on the cards. Instead, they are fake one with the BITSTREAM section stripped. + xclbin download on the fake xclbin files in VM should result in the real one being programed without any + user perceiving + +3. Cloud vendors have more control on the xclbin download process + + Download xclbin involves the talking between VMs or containers and the hosts. Cloud vendors have their + own ways they trust to do this. + +XRT addresses all of these concerns + +Mailbox, Message Service Daemon(MSD) and Message Proxy Daemon(MPD) +================================================================== + +The following picture illustrates how a xclbin file is downloaded on baremetal machine + +.. image:: mailbox-msd-mpd-architecture.svg + :align: center + +As shown by the red arrows in the picture, the flow of the download in this case is as following: + +1. User requests xclbin download from ‘xbutil program’ cmdline or OpenCL API +2. Shim layer issues ioctl to xocl driver +3. Xocl driver subdev icap sends the req to subdev mailbox (the xclbin file is in host memory, the req size is very small) +4. Mailbox transfers the req to the peer in xclmgmt through hw mailbox +5. Xclmgmt driver subdev mailbox forwards the req to the subdev icap +6. Xclmgmt driver subdev icap receives the req, gets xclbin file from memory, and programs the icap +7. Response is sent back to the user following the reverse way + +**Note:** *This model also works for containers running on top of baremetal if no special requirement, ie, xclbin protection, +is needed* + +XRT has its xclmgmt and xocl driver separated. Mailbox is communication channel between the 2 drivers, with +which the user can do some management works with USER PF in VM, ie. download xclbin. However, HW mailbox by +design has very low bandwidth, and that makes transfer of a hundred Mega Byte xclbin file very slow. SW mailbox +is the complementary to the mailbox framework to overcome this, and it also helps as well for cases where there +are no working HW mailbox + +SW mailbox relies on MSD/MPD. MSD resides in userspace of the machine(ie. Host) where xclmgmt driver is installed, +while MPD resides in userspace of the machine(ie. VM) where xocl driver is installed. They talk to the mailbox subdev +in the corresponding driver. MSD/MPD may be connected through external networking, eg. Ethernet, and make the download +xclbin faster + +**Note:** *In order to use SW mailbox, the cloud vendor has to setup the networking connection between Host and VM. +And this model provides a faster xclbin download, but doesn't provide xclbin protection* + +Once the networking connection is setup, the following configurations are also required + +.. code-block:: bash + + # In host, make sure the IP address configured is the one VM talks to + Host>$ sudo xbmgmt config --show + + # If the IP address is not correct, change it by running + Host>$ sudo xbmgmt config --daemon --host + + # Start MSD in host + Host>$ sudo systemctl start msd + + # Start MPD in VM + VM>$ sudo systemctl start mpd + + #Enable these services If you want them to be in active even after reboot + Host>$ sudo systemctl enable msd + + VM>$ sudo systemctl enable mpd + +The flow of downloading xclbin through SW mailbox and MSD/MPD is illustrated as below: + +.. image:: sw-mailbox-msd-mpd-download.svg + :align: center + +As shown by the green arrows in the picture, the flow of the download in this case is as following: + +1. User requests xclbin download from ‘xbutil program’ cmdline or OpenCL API +2. Shim layer issues ioctl to xocl driver +3. Xocl driver subdev icap sends the req to subdev mailbox +4. Mailbox transfers the req and xclbin file to MPD as mailbox message +5. MPD forwards the mailbox message to MSD +6. MSD transfers the mailbox message to xclmgmt driver subdev mailbox +7. Xclmgmt driver subdev mailbox forwards the req to the subdev icap +8. Xclmgmt driver subdev icap receives the req, and programs the icap +9. Response is sent back to the user following the reverse way + + +The mailbox(HW&SW) and MSD/MPD framework perfectly addresses the 1st concern mentioned above + +Enhancement to MPD +================== + +MSD/MPD are mailbox message centric. They focus on the delivering of the mailbox message and don't interpret them. +In order to protect the xclbin file, in which case users feed fake xclbin files to xocl then plugins get real ones +and re-feed to xclmgmt, MSD/MPD have to interpret and understand the download xclbin message. An enhancement to MPD +interprets the mailbox message and calls into vendor specific plugin to download the xclbin + +The input to the plugin is the xclbin file fed by the user in VM or container -- it may be a fake xclbin file. The +plugin calls cloud vendor specific APIs to do the real download. It is the cloud vendor responsibility to, + +1. Save the real xclbin files in a dedicated database +2. Retrieve the real xclbin from fake one +3. Ascertain the legality of the download itself +4. Talk to the MGMT PF (xclmgmt driver) to download the real xclbin + +**Note:** *In this model, the cloud vendor APIs don't know anything about mailbox. They talk to ICAP through ioctl directly. So +MSD is not being used* + +The flow of downloading protected xclbin through plugin is illustrated as below: + +.. image:: sw-mailbox-mpd-plugin-download.svg + :align: center + +The vendor private part shown in the picture needs to, + +1. Provide database to save real xclbin files +2. Provide download API to MPD plugin +3. Check the legality of the download + i. whether the user is authorized + ii. whether the xclbin is valid + iii. whether the FPGA owned by the user + iv. etc +4. Retrieve the real xclbin +5. Download the retrieved xclbin + +The enhancement to the MPD and the plugin address the 2nd and 3rd concerns mentioned above + +Example MPD plugin +================== + +The example plugin aims at containers running on top of baremetal machines. In this case, both MGMT PF and USER PF are in the same +domain, so plugin can call ioctl on xclmgmt directly to program ICAP after it retrieves the real xclbin. This is the use case +for Nimbix + +The plugin is built as shared object -- libcontainer_mpd_plugin.so, and when users install the container pkg, the 'so' file +will be installed at /opt/xilinx/xrt/lib, and a soft link file -- libmpd_plugin.so is created under the same folder +linking to the plugin shared object. MPD tries to dlopen(3) the shared object when it gets started + +This delivered container plugin by default just uses the input xclbin file as output(that means no xclbin protection), +show-casing how this plugin is going to be implemented. It does have example code how to save real xclbin, how to retrieve +real xclbin from fake one, and how to download a protected xclbin, as user's reference + +This plugin can also be used for internal test on the MPD and mailbox + +Example how a ubuntu host of containers configures the plugin + +.. code-block:: bash + + # install xrt pkg + $ sudo apt install /opt/xrt_201920.2.3.0_18.04-xrt.deb + + # install xrt pkg + $ sudo apt install /opt/xrt_201920.2.3.0_18.04-container.deb + + # config mailbox channel switch + # this has to be manually configurated to ensure download xclbin going through SW mailbox + $ sudo echo 0x100 > /sys/bus/pci/devices/0000\:65\:00.0/config_mailbox_channel_switch + + # When cloud vendor (eg. Nimbix) wants to enable its own xclbin protection mechanism, this + # plugin needs to be rebuilt and the built 'so' needs to be copied to /opt/xilinx/xrt/lib + # eg + $ sudo cp libcontainer_mpd_plugin.so /opt/xilinx/xrt/lib + $ sudo systemctl restart mpd + +Summary +======= + +With the MSD/MPD framework and MPD enhancement, + +1. Same XRT pkg is installed everywhere, baremetal/IaaS/PaaS/etc. Vendors only need to create/install their + specific plugins +2. Users have same Xilinx® FPGA using experience everywhere -- they don't even know whether they are running + within baremetal, VM, or containers, they don't know whether the xclbin files they see are real one, fake + one or any other kind either + +The following picture illustrates how XRT is being deployed in different scenarios at cloud vendors + +.. image:: xrt-deployment-cloud.svg + :align: center + +Special Case +============ + +There is special case where download xclbin from within user VM is not required. + +In this special case, neither MSD/MPD nor plugins are required since no xclbin download is allowed from guest. xclbins can be preloaded +either by hypervisor or dom0 type VM where the mgmt PF is assigned. The apps in user VM run without any change, i.e. +xclbin download ioctl is still issued to xocl driver, xocl driver gets the uuid of the preloaded xclbin with a XCL_MAILBOX_REQ_PEER_DATA +mailbox opcode to xclmgmt, and if the uuid matches with that of the xclbin requested for download, the ioctl returns immediately with success. +If the uuids don't match, download request in the guest fails. +download happening. + +Note +==== + +There are some machine configurations which prevents TCP connections. User should update the configurations to allow TCP connections. +One of the configs is "Firwall" Settings to enable or disable: +1. firewall disable command: ufw disable +2. firewall enable command: ufw enable diff --git a/2024.2/html/_sources/debug-faq.rst.txt b/2024.2/html/_sources/debug-faq.rst.txt new file mode 100644 index 00000000000..aabe4e7bf6b --- /dev/null +++ b/2024.2/html/_sources/debug-faq.rst.txt @@ -0,0 +1,154 @@ +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +XRT/Board Debug FAQ +------------------- + +Debugging failures on board runs can be a daunting task which often requires *tribal knowledge* to be effective. This document attempts to document the tricks of the trade to help reduce debug cycles for all users. This is a living document and will be continuously updated. + +Tools of the Trade +~~~~~~~~~~~~~~~~~~ + +``dmesg`` + Capture Linux kernel and XRT drivers log +``strace`` + Capture trace of system calls made by an XRT application +``gdb`` + Capture stack trace of an XRT application +``lspci`` + Enumerate Xilinx® PCIe devices +``xbutil`` + Query status of Xilinx® PCIe device +``xclbinutil`` + Retrieve info from an xclbin +XRT API Trace + Run failing application with XRT logging enabled in ``xrt.ini`` file + + [Runtime] + runtime_log=my_run.log + +Validating a Working Setup +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When observing an application failure on a board, it is important to step back and validate the board setup. That will help establish and validate a clean working environment before running the failing application. We need to ensure that the board is enumerating and functioning. + +Board Enumeration + Check if BIOS and Linux can see the board. So for Xilinx® boards use ``lspci`` utility :: + + lspci -v -d 10ee: + + Check if XRT can see the board and reports sane values :: + + xbutil examine + +XSA Sanity Test + Card validation on kernel, bandwidth, dmatest and etc. (--device for pointing a specific board) :: + + xbutil validate --device + + Check DDR and PCIe bandwidth :: + + xbutil validate --device --run dma + +Common Reasons For Failures +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Incorrect Memory Topology Usage +............................... + +5.0+ XSAs are considered dynamic platforms which use sparse connectivity between acceleration kernels and memory controllers (MIGs). This means that a kernel port can only read/write from/to a specific MIG. This connectivity is frozen at xclbin generation time in specified in mem_topology section of xclbin. The host application needs to ensure that it uses the correct memory banks for buffer allocation using cl_mem_ext_ptr_t for OpenCL applications. For XRT native applications the bank is specified when allocating buffer using ``xrt::kernel::group_id()``. + +If an application is producing incorrect results it is important to review the host code to ensure that host application and xclbin agree on memory topology. One way to validate this at runtime is to enable XRT logging in ``xrt.ini`` and then carefully go through all buffer allocation requests. + +Memory Read Before Write +........................ + +Read-Before-Write in 5.0+ XSAs will cause MIG *ECC* error. This is typically a user error. For example if user expects a kernel to write 4KB of data in DDR but it produced only 1KB of data and now the user tries to transfer full 4KB of data to host. It can also happen if user supplied 1KB sized buffer to a kernel but the kernel tries to read 4KB of data. Note ECC read-before-write error occurs if -- since the last bitstream download which results in MIG initialization -- no data has been written to a memory location but a read request is made for that same memory location. ECC errors stall the affected MIG since usually kernels are not able to handle this error. This can manifest in two different ways: + +1. CU may hang or stall because it does not know how to handle this error while reading/writing to/from the affected MIG. ``xbutil examine --device --report dynamic-regions`` will show that the CU is stuck in *BUSY* state and not making progress. +2. AXI Firewall may trip if PCIe DMA request is made to the affected MIG as the DMA engine will be unable to complete request. AXI Firewall trips result in the Linux kernel driver killing all processes which have opened the device node with *SIGBUS* signal. ``xbutil examine --device --report firewall`` will show if an AXI Firewall has indeed tripped including its timestamp. + +Users should review the host code carefully. One common example is compression where the size of the compressed data is not known upfront and an application may try to migrate more data to host than was produced by the kernel. + +Incorrect Frequency Scaling +........................... + +Incorrect frequency scaling usually indicates a tooling or +infrastructure bug. Target frequencies for the dynamic (partial +reconfiguration) region are frozen at compile time and specified in +``clock_freq_topology`` section of ``xclbin``. If clocks in the dynamic region +are running at incorrect — higher than specified — frequency, +kernels will demonstrate weird behavior. + +1. Often a CU will produce completely incorrect result with no identifiable pattern +2. A CU might hang +3. When run several times, a CU may produce correct results a few times and incorrect results rest of the time +4. A single CU run may produce a pattern of correct and incorrect result segments. Hence for a CU which produces a very long vector output (e.g. vector add), a pattern of correct — typically 64 bytes or one AXI burst — segment followed by incorrect segments are generated. + +Users should check the frequency of the board with ``xbutil examine --device --report platform`` and compare it against the metadata in xclbin. ``xclbinutil`` may be used to extract metadata from xclbin. + +CU Deadlock +........... + +HLS scheduler bugs can also result in CU hangs. CU deadlocks AXI data bus at which point neither read nor write operation can make progress. The deadlocks can be observed with ``xbutil examine --device --report dynamic-regions`` where the CU will appear stuck in *START* or *---* state (can also be observed through debug-ip using the command ``xbutil examine --device --report debug-ip-status``). Note this deadlock can cause other CUs which read/write from/to the same MIG to also hang. + + +AXI Bus Deadlock +................ + +AXI Bus deadlocks can be caused by `Memory Read Before Write`_ or `CU Deadlock`_ described above. These usually show up as CU hang and sometimes may cause AXI FireWall to trip. Run ``xbutil examine --device --report dynamic-regions`` and ``xbutil examine --device --report firewall`` to check if CU is stuck in *START* or *--* state or if one of the AXI Firewall has tripped. + + +Platform Bugs +............. + +Bitsream Download Failures + Bitstream download failures are usually + caused because of incompatible xclbin(s). ``dmesg`` log would + provide more insight into why the download failed. At OpenCL level + they usually manifest as Invalid Binary (error -44). + + Rarely MIG calibration might fail after bitstream download. This + will also show up as bitstream download failure. Usually XRT driver + messages in ``dmesg`` would reveal if MIG calibration failed. + +Incorrect Timing Constraints + If the platform or dynamic region has invalid timing constraints — which is really a platform or Vitis tool bug — CUs would show bizarre behaviors. This may result in incorrect outputs or CU/application hangs. + +Board in Crashed State +~~~~~~~~~~~~~~~~~~~~~~ + +When board is in crashed state PCIe read operations start returning +``0XFF``. In this state ``xbutil examine`` would show bizarre +metrics. For example ``Temp`` would be very high. Boards in crashed state +may be recovered with PCIe hot reset :: + + xbutil reset + +If this does not recover the board perform a warm reboot. After reset/reboot please follow steps in `Validating a Working Setup`_ + +If for some reason communication between xocl driver and management driver gets disrupted, ``xbutil reset`` may not be successful to reset the board. In those cases the following steps are recommended with the help of the sysadmin who has the root previledge + +1) unload xocl driver (also shut down VM if xocl is running on a VM) +2) Run ``xbmgmt reset`` + +XRT Scheduling Options +~~~~~~~~~~~~~~~~~~~~~~ + +XRT has three kernel execution schedulers today: ERT, KDS and legacy. By default XRT uses ERT which runs on Microblaze. ERT is accessed through KDS which runs inside xocl Linux kernel driver. If ERT is not available KDS uses its own built-in scheduler. From 2018.2 release onwards KDS (tgether with ERT if available in the XSA) is enabled by default. Users can optionally switch to legacy scheduler which runs in userspace. Switching scheduler will help isolate any scheduler related XRT bugs :: + + [Runtime] + ert=false + kds=false + +Writing Good Bug Reports +~~~~~~~~~~~~~~~~~~~~~~~~ + +When creating bug reports please include the following: + +1. Output of ``dmesg`` +2. Output of ``xbutil examine --device --report all`` +3. Application binaries: xclbin, host executable and code, any data files used by the application +4. XRT version +5. XSA name and version diff --git a/2024.2/html/_sources/ert.main.rst.txt b/2024.2/html/_sources/ert.main.rst.txt new file mode 100644 index 00000000000..4b09f33035b --- /dev/null +++ b/2024.2/html/_sources/ert.main.rst.txt @@ -0,0 +1,10 @@ +.. _ert.main.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Embedded Runtime Library +************************ + +.. include:: ../core/ert.rst diff --git a/2024.2/html/_sources/execution-model.rst.txt b/2024.2/html/_sources/execution-model.rst.txt new file mode 100644 index 00000000000..133c524fd66 --- /dev/null +++ b/2024.2/html/_sources/execution-model.rst.txt @@ -0,0 +1,67 @@ +.. _execution-model.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Execution Model Overview +************************ + +Introduction +============ +Xilinx® FPGA based acceleration platform architecture are described in the :doc:`platforms` document. On *Alveo PCIe* platforms *xocl* driver binds to user physical function and *xclmgmt* driver binds to management physical function. The ioctls exported by xocl are described in :doc:`xocl_ioctl.main` document and ioctls exported by xclmgmt are described in :doc:`mgmt-ioctl.main` document. On *Zynq Ultrascale+ MPSoC* platforms *zocl* driver binds to the accelerator. The ioctls exported by zocl are listed here TODO. + +Image Download +============== + +Xilinx® Vitis compiler, v++ compiles user's device code into xclbin file which contains FPGA bitstream and collection of metadata like memory topology, IP instantiations, etc. xclbin format is defined in :doc:`formats` document. For Alveo platforms xclmgmt driver provides an ioctl for xclbin download. For Zynq Ultrascale+ MPSoC zocl provides a similar ioctl for xclbin download. Both drivers support FPGA Manager integration. The drivers walk the xclbin sections, program the FPGA fabric, discover the memory topology, initialize the memory managers for the provided memory topology and discover user's compute units programmed into the FPGA fabric. + +Memory Management +================= + +Both PCIe based and embedded platforms use a unified multi-thread/process capable memory management API defined in :doc:`xrt.main` document. + +For both class of platforms, memory management is performed inside Linux kernel driver. Both drivers use DRM GEM for memory management which includes buffer allocator, buffer mmap support, reference counting of buffers and DMA-BUF export/import. These operations are made available via ioctls exported by the drivers. + +xocl +---- + +Xilinx® PCIe platforms like Alveo PCIe cards support various memory topologies which can be dynamically loaded as part of FPGA image loading step. This means from one FPGA image to another the device may expose one or more memory controllers where each memory controller has its own memory address range. We use Linux *drm_mm* for allocation of memory and Linux *drm_gem* framework for mmap handling. Since ordinarily our device memory is not exposed to host CPU (except when we enable PCIe peer-to-peer feature) we use host memory pages to back device memory for mmap support. For syncing between device memory and host memory pages XDMA/QDMA PCIe memory mapped DMA engine is used. Users call sync ioctl to effect DMA in requested direction. + +xocl also supports PCIe Host Memory Bridge where it handles pinning of host memory and programming the Address Remapper tables. Section :doc:`hm` provides more information. + +zocl +---- + +Xilinx® embedded platforms like Zynq Ultrascale+ MPSoC support various memory topologies as well. In addition to memory shared between PL (FPAG fabric) and PS (ARM A-53) we can also have dedicated memory for PL using a soft memory controller that is instantiated in the PL itself. zocl supports both CMA backed memory management where accelerators in PL use physical addresses and SVM based memory management -- with the help of ARM SMMU -- where accelerators in PL use virtual addresses also shared with application running on PS. + +Execution Management +==================== + +Both xocl and zocl support structured execution framework. After xclbin has been loaded by the driver compute units defined by the xclbin are live and ready for execution. The compute units are controlled by driver component called Kernel Domain Scheduler (KDS). KDS queues up execution tasks from client processes via ioctls and then schedules them on available compute units. Both drivers export an ioctl for queuing up execution tasks. + +User space submits execution commands to KDS in well defined command packets. The commands are defined in :doc:`ert.main` + +KDS notifies user process of a submitted execution task completion asynchronously via POSIX poll mechanism. On PCIe platforms KDS leverages hardware scheduler running on Microblaze soft processor for fine control of compute units. Compute units use interrupts to notify xocl/zocl when they are done. KDS also supports polling mode where KDS actively polls the compute units for completion instead of relying on interrupts from compute units. + +On PCIe platforms hardware scheduler (referred to above) runs firmware called Embedded Runtime (ERT). ERT receives requests from KDS on hardware out-of-order Command Queue with upto 128 command slots. ERT notifies KDS of work completion by using bits in Status Register and MSI-X interrupts. ERT source code is also included with XRT source on GitHub. + +Board Management +================ + +For Alveo boards xclmgmt does the board management like board recovery in case compute units hang the data bus, sensor data collection, AXI Firewall monitoring, clock scaling, power measurement, loading of firmware files on embedded soft processors like ERT and CMC. + +Execution Flow +============== + +A typical user execution flow would like the following: + +1. Load xclbin using DOWNLOAD ioctl +2. Discover compute unit register map from xclbin +3. Allocate data buffers to feed to the compute units using CREATE_BO/MAP_BO ioctl calls +4. Migrate input data buffers from host to device using SYNC_BO ioctl +5. Allocate an execution command buffer using CREATE_BO/MAP_BO ioctl call and fill the command buffer using data in 2 above and following the format defined in ert.h +6. Submit the execution command buffer using EXECBUF ioctl +7. Wait for completion using POSIX poll +8. Migrate output data buffers from device to host using SYNC_BO ioctl +9. Release data buffers and command buffer diff --git a/2024.2/html/_sources/formats.rst.txt b/2024.2/html/_sources/formats.rst.txt new file mode 100644 index 00000000000..425909a2a91 --- /dev/null +++ b/2024.2/html/_sources/formats.rst.txt @@ -0,0 +1,53 @@ +.. _formats.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Binary Formats +-------------- + +xclbin +~~~~~~ + +**xclbin** container format (also known as AXLF) is defined in file +``xclbin.h``. The file uses **xclbin2** as the magic word. AXLF is +sections based extensible container. Different sections store different +parts of compiled application like bitstreams for PL (FPGA fabric), ELF +for AIE tiles and embedded processors like Microblaze. It also contains +well structured metadata to define memory topology, IP layout of instantiated +peripherals and compute kernels, clocking details and kernel connectivity +for each compute kernel. + +The compiler generates unique xclbin file tagged with UUID for every application +compiled. Each xclbin also has a another UUID which defines its compatbility to +the Shell. Vitis compiler, v++ generates this file as part of linking stage. End-users +load this file via XRT xclLoadXclbin() API. XRT userspace and kernel space +components consume different sections of xclbin by programming the hardware +and initializing key data structures in XRT userspace libraries and XRT +kernel drivers. + +xclbins can also be signed. More information can be found in :doc:`security`. + +The path to ``xclbin.h`` is ``xrt/include/xclbin.h`` inside XRT +installation directory. + +XRT provides a very powerful utility, **xclbinutil** which can be used to read/write/change +xclbins. More information can be found in the section on :doc:`xclbintools` + +Feature ROM +~~~~~~~~~~~ + +Feature ROM is like a BIOS like table for FPGA which describes key +properties of the device like its name and features enabled in the +Shell of the platform. The format for the data in Feature ROM is +defined in file ``xclfeatures.h``. It a section of memory mapped BRAM +memory which can be used for data sharing, error checking, +functionality discovery in Alveo platforms. Vivado tools will +programmatically capture and populate BRAM memory in the platform +during platform creation time. Runtime components like drivers read it +and enable functionality in driver and also use the information to +perform hardware/software compatibility checks. + +The path to ``xclfeatures.h`` is ``xrt/include/xclfeatures.h`` inside +XRT installation directory. diff --git a/2024.2/html/_sources/fpga_device_ready.rst.txt b/2024.2/html/_sources/fpga_device_ready.rst.txt new file mode 100644 index 00000000000..5d612cfe5e0 --- /dev/null +++ b/2024.2/html/_sources/fpga_device_ready.rst.txt @@ -0,0 +1,61 @@ +.. _fpga_device_ready.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +FPGA device readiness within user VM +************************************ + +To make the FPGA in user VM ready to use, the mgmt side has to be ready first. Beside that, depending on how +the FPGA is deployed, there are still other factors to be considered. + +With MSD/MPD or Plugins used +============================ + +In this case, :doc:`mailbox.main` has to be opened from guest VM, and MPD service must be active in the guest VM. + +For MSD/MPD, the MSD service must be active also either from hypervisor or dom0 type VM where the mgmt PF of the FPGA +is assigned. + +For Plugins, the vendor specific plugin package has to be installed + +Without MSD/MPD or Plugins used +=============================== + +In this case, :doc:`mailbox.main` is still being used, but there is no open to the mailbox subdevice from user, +and MPD service is inactive. +To make the FPGA device ready, the XCL_MAILBOX_REQ_LOAD_XCLBIN opcode has to be disabled in xclmgmt. +System administrator managing the privileged management physical function driver xclmgmt can disable specific opcodes +using xbmgmt utility as follows. + +.. code-block:: bash + + # In host + Host>$ sudo xbmgmt dump --config --output /tmp/config.ini -d bdf + + # Edit the dumped ini file and change the value to key 'mailbox_channel_disable' + # mailbox_channel_disable=0x100, + # where 0x100 is 1 << XCL_MAILBOX_REQ_LOAD_XCLBIN, as defined as below + # XCL_MAILBOX_REQ_UNKNOWN = 0, + # XCL_MAILBOX_REQ_TEST_READY = 1, + # XCL_MAILBOX_REQ_TEST_READ = 2, + # XCL_MAILBOX_REQ_LOCK_BITSTREAM = 3, + # XCL_MAILBOX_REQ_UNLOCK_BITSTREAM = 4, + # XCL_MAILBOX_REQ_HOT_RESET = 5, + # XCL_MAILBOX_REQ_FIREWALL = 6, + # XCL_MAILBOX_REQ_LOAD_XCLBIN_KADDR = 7, + # XCL_MAILBOX_REQ_LOAD_XCLBIN = 8, + # XCL_MAILBOX_REQ_RECLOCK = 9, + # XCL_MAILBOX_REQ_PEER_DATA = 10, + # XCL_MAILBOX_REQ_USER_PROBE = 11, + # XCL_MAILBOX_REQ_MGMT_STATE = 12, + # XCL_MAILBOX_REQ_CHG_SHELL = 13, + # XCL_MAILBOX_REQ_PROGRAM_SHELL = 14, + # XCL_MAILBOX_REQ_READ_P2P_BAR_ADDR = 15, + + Host>$ vi /tmp/config.ini + + # Load config + Host>$ xbmgmt advanced --load-conf --input=/tmp/config.ini -d bdf diff --git a/2024.2/html/_sources/hm.rst.txt b/2024.2/html/_sources/hm.rst.txt new file mode 100644 index 00000000000..32dda91409a --- /dev/null +++ b/2024.2/html/_sources/hm.rst.txt @@ -0,0 +1,184 @@ +.. _hm.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2022 Xilinx, Inc. All rights reserved. + +Host Memory Access +================== + +Some of the recent Alveo cards support direct host memory access by the kernel(s) to read/write data directly from/to the host memory. Unlike the XDMA data transfer, this data transfer mechanism does not utilize global memories (DDR, HBM, PLRAM, etc) on the card. This feature provides DMA bypass capability that is primarily used for data transfer on a No-DMA platform. + + +Kernel Compilation +------------------ + +Use the following V++ configuration option to configure kernel port's connectivity to allow drive data through the AXI bridge to the host memory. + +.. code-block:: bash + + [connectivity] + ## Syntax + sp=my_kernel_1.m_axi_gmem:HOST[0] + + +Host Server Setup +----------------- + +To enable host memory access functionality the following settings are required from the host + +Hugepage Requirement +~~~~~~~~~~~~~~~~~~~~ + +If the kernel requirement of the host memory is more than 1GB, XRT allocates multiple Hugepages from the host memory. These hugepages are internally remapped (inside the FPGA shell) so that kernel can see a large contiguous bank-like memory. + + +**Steps required to enable Hugepages (Linux)** + + 1. Modify grub setup by changing the following line of the file ``/etc/default/grub`` + + `GRUB_CMDLINE_LINUX_DEFAULT="splash quiet noresume hugepagesz=1G hugepages=4"` + + 2. shell>update-grub + + 3. Reboot the server + + 4. Verify the HugePage setting + +.. code-block:: bash + + shell>hugeadm --pool-list + + Size Minimum Current Maximum Default + 2097152 0 0 0 * + 1073741824 4 4 4 + + +The following table can be used to determine the number of Hugepages required based on the host memory requirement + ++-------------------------+-----------------------------+ +| Host Mem Requirement | Huge Page Setting | ++=========================+=============================+ +| M <= 1GB | Not Required | ++-------------------------+-----------------------------+ +| M>1GB and M<=2GB | No of 1G HugePages = 2 | ++-------------------------+-----------------------------+ +| M>2GB and M<=4GB | No of 1G HugePages = 4 | ++-------------------------+-----------------------------+ +| M>4GB and M<=8GB | No of 1G HugePages = 8 | ++-------------------------+-----------------------------+ +| M>8GB and M<=16GB | No of 1G HugePages = 16 | ++-------------------------+-----------------------------+ + +Enabling the Host Memory by XRT +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Irrespective of the Hugepages settings, ``xbutil configure --host-mem`` command must be used to reserve the host memory for the kernel. This has to be done upfront before the XCLBIN download. In the example below, ``sudo xbutil configure --host-mem -d `` command is used to reserve 1G, 4G, and 16G host memory respectively for 3 cards. + +.. code-block:: bash + + sudo xbutil configure --host-mem -d 0000:a6:00.1 --size 1G enable + sudo xbutil configure --host-mem -d 0000:73:00.1 --size 4G enable + sudo xbutil configure --host-mem -d 0000:17:00.1 --size 16G enable + + +Maximum Host memory supported by the platform +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For the platform supporting the host memory access feature, we can observe the following two data from the ``xbutil examine -r pcie-info -d `` output + + - **Max Shared Host Memory**: The maximum host memory supported by the platform. + - **Shared Host Memory**: The host memory specified for this card (by ``xbutil configure --host-mem``) + +Assuming the platform supported maximum host memory is 16GB, the following output will be observed when the card is configured for 1GB host memory, no xclbin loaded. + +.. code-block:: bash + + shell>>sudo xbutil configure --host-mem -d 0000:17:00.1 --size 1G enable + + Host-mem enabled successfully + shell>>xbutil examine -r pcie-info -d 0000:17:00.1 + + ----------------------------------------------- + 1/1 [0000:a6:00.1] : xilinx_u250_gen3x16_xdma_shell_3_1 + ----------------------------------------------- + Pcie Info + Vendor : 0x10ee + Device : 0x5005 + Sub Device : 0x000e + Sub Vendor : 0x10ee + PCIe : Gen3x16 + DMA Thread Count : 2 + CPU Affinity : 16-31,48-63 + Shared Host Memory : 1 GB + Max Shared Host Memory : 0 Byte + Enabled Host Memory : 0 Byte + +When you load an xclbin with the host mem support, the ``Max Shared Host Mem`` gets populated. + +.. code-block:: bash + + shell>>xbutil examine -r pcie-info -d 0000:17:00.1 + + ----------------------------------------------- + 1/1 [0000:a6:00.1] : xilinx_u250_gen3x16_xdma_shell_3_1 + ----------------------------------------------- + Pcie Info + Vendor : 0x10ee + Device : 0x5005 + Sub Device : 0x000e + Sub Vendor : 0x10ee + PCIe : Gen3x16 + DMA Thread Count : 2 + CPU Affinity : 16-31,48-63 + Shared Host Memory : 1 GB + Max Shared Host Memory : 16 GB + Enabled Host Memory : 0 Byte + +Finally, when you run an application which exercises HOST[0], ``Enabled Host Memory`` is populated. + +.. code-block:: bash + + shell>>xbutil examine -r pcie-info -d 0000:17:00.1 + + ----------------------------------------------- + 1/1 [0000:a6:00.1] : xilinx_u250_gen3x16_xdma_shell_3_1 + ----------------------------------------------- + Pcie Info + Vendor : 0x10ee + Device : 0x5005 + Sub Device : 0x000e + Sub Vendor : 0x10ee + PCIe : Gen3x16 + DMA Thread Count : 2 + CPU Affinity : 16-31,48-63 + Shared Host Memory : 1 GB + Max Shared Host Memory : 16 GB + Enabled Host Memory : 1 GB + +Host code Guideline +------------------- + +XRT OpenCL introduces a new buffer extension Flag ``XCL_MEM_EXT_HOST_ONLY`` that should be used to denote a Host-only buffer + +.. code-block:: c++ + + cl_mem_ext_ptr_t host_buffer_ext; + host_buffer_ext.flags = XCL_MEM_EXT_HOST_ONLY; + host_buffer_ext.obj = NULL; + host_buffer_ext.param = 0; + + cl::Buffer buffer_in (context,CL_MEM_READ_ONLY |CL_MEM_EXT_PTR_XILINX, size, &host_buffer_ext); + cl::Buffer buffer_out(context,CL_MEM_WRITE_ONLY |CL_MEM_EXT_PTR_XILINX, size, &host_buffer_ext); + +In XRT Native APIs the ``xrt::bo`` object should be created with the flag ``xrt::bo::flags::host_only`` as shown in the example below + +.. code-block:: c++ + + xrt::bo buffer_in (device, size, xrt::bo::flags::host_only, kernel.group_id(0)); + xrt::bo buffer_out(device, size, xrt::bo::flags::host_only, kernel.group_id(1)); + +Also ensure to follow coding guideline as stated below + + - Let XRT allocate the buffer as shown in the above code examples. Do not create a buffer from an already created user-space memory. The host code should map the buffer object to the user-space for read/write operation. + - Regular data transfer APIs (OpenCL: ``clEnqueueMigramemObjects``/``clEnqueueWriteBuffer``, XRT Native API: ``xrt::bo::sync()``) should be used. Though these API will not do any DMA operation, they are used for Cache Invalidate/Flush as the application works on the Cache memory. diff --git a/2024.2/html/_sources/index.rst.txt b/2024.2/html/_sources/index.rst.txt new file mode 100644 index 00000000000..6c4efdef709 --- /dev/null +++ b/2024.2/html/_sources/index.rst.txt @@ -0,0 +1,119 @@ +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + comment:: Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved. + +================================== +Xilinx® Runtime (XRT) Architecture +================================== + +Xilinx® Runtime (XRT) is implemented as a combination of userspace and kernel +driver components. XRT supports both PCIe based accelerator cards and MPSoC +based embedded architecture provides standardized software interface to Xilinx® +FPGA. The key user APIs are defined in ``xrt.h`` header file. + +.. image:: XRT-Layers.svg + :align: center + +---------------------------------------------------------------------------- + +.. toctree:: + :maxdepth: 1 + :caption: Introduction + + platforms.rst + system_requirements.rst + build.rst + install.rst + test.rst + + +.. toctree:: + :maxdepth: 1 + :caption: Use Model and Features + + execution-model.rst + xrt_kernel_executions.rst + multiprocess.rst + p2p.rst + m2m.rst + hm.rst + xrt_ini.rst + +.. toctree:: + :maxdepth: 1 + :caption: User API Library + + opencl_extension.rst + xrt_native_apis.rst + xrt_native.main.rst + + +.. toctree:: + :caption: XRT Developer's Space + :maxdepth: 1 + + platforms_partitions.rst + sysfs.rst + formats.rst + ert.main.rst + mgmt-ioctl.main.rst + xocl_ioctl.main.rst + zocl_ioctl.main.rst + + + +.. toctree:: + :caption: Tools and Utilities + :maxdepth: 1 + + xclbintools.rst + xbutil.rst + xbmgmt.rst + xbflash2.rst + xball.rst + xbtop.rst + xbtools_map.rst + nagios_plugin.rst + + +.. toctree:: + :caption: Building Platforms + :maxdepth: 1 + + yocto.rst + test.rst + + +.. toctree:: + :caption: Cloud Support + :maxdepth: 1 + + mailbox.main.rst + mailbox.proto.rst + cloud_vendor_support.rst + fpga_device_ready.rst + vsec.rst + + +.. toctree:: + :caption: Security + :maxdepth: 1 + + security.rst + +.. toctree:: + :caption: Python binding + :maxdepth: 1 + + pyxrt.rst + +.. toctree:: + :caption: Debug and Faqs + :maxdepth: 1 + + debug-faq.rst + +---------------------------------------------------------------------------- + +For any questions on XRT please email runtime@xilinx.com diff --git a/2024.2/html/_sources/install.rst.txt b/2024.2/html/_sources/install.rst.txt new file mode 100644 index 00000000000..163ee91371c --- /dev/null +++ b/2024.2/html/_sources/install.rst.txt @@ -0,0 +1,81 @@ +.. _install.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +XRT Installation +---------------- + +Install XRT Pre-requests on Deployment Server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +XRT requires EPEL to install dependencies during installation process. Please use the folloing steps to install EPEL on your system if it hasn't been installed. + +.. Warning:: If it's on the XRT build server, EPEL should have been installed by ``xrtdeps.sh``. This step can be skipped. + +Steps for RHEL 7.x:: + + sudo yum-config-manager --enable rhel-7-server-optional-rpms + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + +Steps for RHEL 8.x:: + + sudo subscription-manager repos --enable "codeready-builder-for-rhel-8-x86_64-rpms" + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + +Steps for RHEL 9.x:: + + sudo subscription-manager repos --enable "codeready-builder-for-rhel-9-x86_64-rpms" + sudo yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm +Steps for CENTOS 7.x:: + + yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + +Steps for CENTOS 8.x:: + + yum config-manager --set-enabled PowerTools + yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + yum config-manager --set-enabled AppStream + +Steps for AlmaLinux 8.x:: + + sudo dnf config-manager --set-enabled powertools + sudo dnf config-manager --set-enabled appstream + sudo dnf install epel-release + +Steps for AlmaLinux 9.x:: + + sudo dnf config-manager --set-enabled crb + sudo dnf install epel-release + + +Install XRT Software Stack +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After XRT installation packages (DEB or RPM) are downloaded from Xilinx website or built from source, please install it with the following command + +Steps for RHEL/CentOS:: + + sudo yum install xrt_.rpm + +Steps for Ubuntu:: + + sudo apt install xrt_.deb + +Steps to reinstall XRT on RHEL/CentOS:: + + sudo yum reinstall ./xrt_.rpm + +Steps to reinstall XRT on Ubuntu:: + + sudo apt install --reinstall ./xrt_.deb + +.. Warning:: + + 1. If the XRT package is built locally, please make sure ERT firmware ``sched*.bin`` is built properly during build process and installed to ``/lib/firmware/xilinx`` after running the XRT installation command. + + 2. Secure boot enabled machines: Need to configure system to properly load DKMS modules. + Please follow method-1 from following page. You do not need to disable secure boot. First time DKMS compiles XRT (or any other third party) driver it will generate a MOK key that needs to be registered with BIOS. + + https://wiki.ubuntu.com/UEFI/SecureBoot/DKMS diff --git a/2024.2/html/_sources/m2m.rst.txt b/2024.2/html/_sources/m2m.rst.txt new file mode 100644 index 00000000000..d2de2da0faa --- /dev/null +++ b/2024.2/html/_sources/m2m.rst.txt @@ -0,0 +1,46 @@ +.. _m2m.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Memory-to-Memory (M2M) +********************** + +Some of the recent Alveo cards support direct Memory to Memory (M2M) data transfer whithin the card, improving the data transfer performance +as data does not need to be transferred via host while moving from one DDR bank to another. + +Example scenario +================ +Assume there are two kernels in the XCLBIN, K1 and K2. + +K1 interacts with DDR0 due to location proximity on the FPGA. + +Similarly, K2 interacts with DDR1 due to location proximity on the FPGA. + +The output of the K1 is consumed by K2 for its input. So there has to be data transfer from DDR bank0 to DDR bank1. + +.. image:: M2M-transfer.svg + :align: center + +In OpenCL host code, let's assume four buffers: Buf1, Buf2, Buf3, Buf4. + + +So the data transfer from the host follow the order + Buf1 -> K1 -> Buf2 -> Buf3 -> K2 -> Buf4 + +We can perform a copy from Buf2 to Buf3 using standard OpenCL API ``clEnqueueCopyBuffer``. See below a code segment. + +:: + + clEnqueueTask(queue, K1 ,0,nullptr,&events1); + clEnqueueCopyBuffer(queue, Buf2,Buf3,0,0,Buffer_size, 1, &event1, &event2); + clEnqueueTask(queue,K2,1,&event2,nullptr); + + +Some limitations +================ +1. Hardware supports M2M between two DDR banks, not other memory types such as HBM or PLRAM +2. The data being copied has to be 64 bit aligned. Otherwise, XRT will do copy via host using the same API +3. Not all the cards are M2M capable, in that case, XRT will do copy via host using the same API +4. M2M copy of OpenCL sub-buffers is not properly supported diff --git a/2024.2/html/_sources/mailbox.main.rst.txt b/2024.2/html/_sources/mailbox.main.rst.txt new file mode 100644 index 00000000000..f4ab37a5efa --- /dev/null +++ b/2024.2/html/_sources/mailbox.main.rst.txt @@ -0,0 +1,10 @@ +.. _mailbox.main.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Mailbox Subdevice Driver +------------------------ + +.. include:: ../core/mailbox.rst diff --git a/2024.2/html/_sources/mailbox.proto.rst.txt b/2024.2/html/_sources/mailbox.proto.rst.txt new file mode 100644 index 00000000000..778e8de4f9e --- /dev/null +++ b/2024.2/html/_sources/mailbox.proto.rst.txt @@ -0,0 +1,10 @@ +.. _mailbox.proto.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Mailbox Inter-domain Communication Protocol +------------------------------------------- + +.. include:: ../core/mailbox_proto.rst diff --git a/2024.2/html/_sources/mgmt-ioctl.main.rst.txt b/2024.2/html/_sources/mgmt-ioctl.main.rst.txt new file mode 100644 index 00000000000..5eec7d96ef2 --- /dev/null +++ b/2024.2/html/_sources/mgmt-ioctl.main.rst.txt @@ -0,0 +1,10 @@ +.. _mgmt-ioctl.main.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +XCLMGMT (PCIe Management Physical Function) Driver Interfaces +************************************************************* + +.. include:: ../core/mgmt-ioctl.rst diff --git a/2024.2/html/_sources/multiprocess.rst.txt b/2024.2/html/_sources/multiprocess.rst.txt new file mode 100644 index 00000000000..430399b83bb --- /dev/null +++ b/2024.2/html/_sources/multiprocess.rst.txt @@ -0,0 +1,58 @@ +.. _multiprocess.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Multi-Process Support +********************* + +Support for Multi-Process kernel execution is default in 2019.1 release. + +Requirements +============ + +Multiple processes can share access to the same device provided each +process uses the same ``xclbin``. Attempting to load different xclbins via +different processes concurrently will result in only one process being +successfull in loading its xclbin. The other processes will get error code +-EBUSY or -EPERM. + +Usage +===== + +If two or more processes execute the same kernel, then these processes +will acquire the kernel's compute units per the ``xocl`` kernel driver +compute unit scheduler, which is first-come first-serve. All +processes have the same priority in XRT. + +Known Issues +============ + +Debug and Profile will only be enable for the first process when multi-process +has been enabled. Emulation flow does not have support for multi-process yet. + + +Implementation Details For Curious +================================== + +Since 2018.3 downloading an xclbin to the device does not guarantee an automatic lock +on the device for the downloading process. Application is required to create explicit +context for each Compute Unit (CU) it wants to use. OCL applications automatically handle +context creation without user needing to change any code. XRT native applications +should create context on a CU with xclOpenContext() API which requires xclbin UUID +and CU index. This information can be obtained from the xclbin binary. xclOpenContext() +increments the xclbin UUID which prevents that xclbin from being unloaded. A corresponding +xclCloseContext() releases the reference count. xclbins can only be swapped if the reference +count is zero. If an application dies or exits without explicitly releasing the contexts it +had opened before the driver would automatically release the stale contexts. + +The following diagram shows a possibility with 7 processes concurrently using a device. The +processes in green are successful but processes in red fail at diffrent stages with appropriate +error codes. Processes P0, P1, P2, P3, P4 and P6 are each trying to use xclbin with UUID_X, +process P5 is attempting to use UUID_Y. Processes P0, P1, P3, P4, and P6 are trying to use CU_0 in +UUID_X. Process P2 is trying to use CU_1 in UUID_X and Process P5 is trying to use CU_0 in UUID_Y. +The diagram shows timeline view with all 7 processes running concurrently. + +.. graphviz:: multi.dot + :caption: Multi-process interaction diagram diff --git a/2024.2/html/_sources/nagios_plugin.rst.txt b/2024.2/html/_sources/nagios_plugin.rst.txt new file mode 100644 index 00000000000..4d4fa43fc3d --- /dev/null +++ b/2024.2/html/_sources/nagios_plugin.rst.txt @@ -0,0 +1,34 @@ +.. _nagios_plugin.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2022 Advanced Micro Devices, Inc. All rights reserved. + + +xrt nagios plugin +====== + +The ``xrt nagios plugin`` tool is a Nagios plugin developed to work with a Nagios infrastructure monitoring system. + +For more information on Nagios click `here `_. + +The plugin places the requested device report text in a JSON format into the standard output. + + +**Options**: These are the options can be used. + + - The ``--device`` (or ``-d``) specifies the target device to query for data + + - : The Bus:Device.Function of the device of interest + +- The ``--report`` (or ``-r``) switch can be used to view specific report(s) of interest from the following options + + - See :ref:`xbutil examine report options ` for a list of valid reports + +**Example commands** + + +.. code-block:: shell + + ./nagios_plugin.sh --device 0000:b3:00.1 --report platform + ./nagios_plugin.sh -d b3:00 -r platform diff --git a/2024.2/html/_sources/newxsa-bringup.rst.txt b/2024.2/html/_sources/newxsa-bringup.rst.txt new file mode 100644 index 00000000000..d88a800e997 --- /dev/null +++ b/2024.2/html/_sources/newxsa-bringup.rst.txt @@ -0,0 +1,91 @@ +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +New XSA Bringup +--------------- + +To fullfill the different requirements, new XSAs are invented based on existing Vitis XSAs. The first thing to verify the new XSA with XRT is to attach XRT drivers with it and see how it works. Then it might need to customizing and making changes in XRT to support the new XSA. + +This section focuses on how to modify XRT drivers to identify the new XSA and how to customize XRT drivers for new XSA. + +Assumption +~~~~~~~~~~ + +1. The firmware(.dsabin) file along with the new XSA is installed to the host properly. This firmware file could contain FPGA flash image, XMC image, microblaze image etc. which is consumed by driver and tools. + +2. The new XSA bitstream is programed to the FPGA board properly. + +Identify XSA +~~~~~~~~~~~~ + +There are two factor could be used to identify the new XSA. The first is PCI vendor-id, device-id and subsystem-id combination. The second is XSA VBNV name. For XRT driver ``src/runtime_src/core/pcie/driver/linux/xocl/devices.h`` need to be changed to identify the new XSA. + +Add new PCI ID combination +.......................... +Open devices.h and search + ``XOCL_MGMT_PCI_IDS`` for management PCI function + + ``XOCL_USER_XDMA_PCI_IDS`` for XDMA user PCI function + + ``XOCL_USER_QDMA_PCI_IDS`` for QDMA user PCI function + + +Then add entries corresponding for the new XSA. The PCI ID combination has to be unique. ``PCI_ANY_ID`` is acceptable wildcast. The behavior of overlapped combination is undefined. + +:: + + { XOCL_PCI_DEVID(0x10EE, 0x4B88, 0x4351, USER_XDMA) }, + { XOCL_PCI_DEVID(0x10EE, 0x6850, PCI_ANY_ID, USER_XDMA) }, + +The "USER_XDMA" in above example is XSA profile macro which describes the IPs implemented by the XSA. + +Add new VBNA +............ +In some cases, two different XSAs use the same PCI ID combination. VBNV is used to identify them. Macro ``XOCL_DSA_VBNV_MAP`` in devices.h is used to combine the VBNV name with XSA profile macro. + +:: + + { 0x10EE, 0x5001, PCI_ANY_ID, "xilinx_u200_xdma_201820_1", &XOCL_BOARD_USER_XDMA }, + +Above example specifies xdma XSA profile for XSA which has VBNV "xilinx_u200_xdma_201820_1". This is going to overwrite the XSA profile combination defined in PCI ID table. + +Customize XSA +~~~~~~~~~~~~~ + +Each XSA is described by a XSA profile macro in devices.h. This macro defines all the required information of XSA, include IP implemented in XSA, IO address and IRQ ranges for each IP, flags etc. The easiest way for new XSA is inheriting from an existing profile and customizing it. + +Here are the supported IPs. And IO address and IRQ ranged are pre-defined in devices.h. If the new XSA introduces new IP or changes the IO address or IRQ, it has to add or modify the corresponding macro. For new IP, XRT is going to create a sub device node (platform devcie) and the driver of the new IP has to developed. + + feature ROM (FEATURE_ROM), + + Memmory Mapped DMA (MM_DMA), + + Embedded Runtime Scheduler(MB_SCHEDULER), + + Xilinx Virtual Cable (XVC_PUB, XVC_PRI), + + System Monitor (SYSMON), + + Axi Firewall (AF), + + Memory Interface Generator (MIG), + + Microblaze (MB), + + Xilinx I2C (XIIC), + + mailbox (MAILBOX), + + Internal Configuration Access Port (ICAP), + + Streaming DMA (STR_DMA), + + XMC (XMC), + + DNA (DNA) + +Debug +~~~~~ + +Using lspci to check if the driver is loaded successfully. And please check dmesg output if the driver is not loaded. diff --git a/2024.2/html/_sources/opencl_extension.rst.txt b/2024.2/html/_sources/opencl_extension.rst.txt new file mode 100644 index 00000000000..fe48256da13 --- /dev/null +++ b/2024.2/html/_sources/opencl_extension.rst.txt @@ -0,0 +1,195 @@ +.. _opencl_extension.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +Xilinx OpenCL extension +*********************** + +Please follow the general OpenCL guidelines to compile host applications that uses XRT OpenCL API with Xilinx OpenCL extensions. +Xilinx OpenCL extension doesn't require additional compiler features. Normally C99 or C++11 would be good. + +All the OpenCL extensions are described in the file ``src/include/1_2/CL/cl_ext_xilinx.h`` + + +OpenCL Buffer extension by ``CL_MEM_EXT_PTR_XILINX`` +===================================================== + +XRT OpenCL implementation provides a mechanism using a structure ``cl_mem_ext_ptr_t`` to specify the special buffer and/or buffer location on the device. Ensure to use ``CL_MEM_EXT_PTR_XILINX`` flag when using this mechanism. Some usecases are as below: + +Specify a special buffer such as P2P buffer, Host only buffer, etc. +------------------------------------------------------------------- + +An example of a P2P Buffer specification + +.. code:: c++ + + cl_mem_ext_ptr_t p2p_buf_ext = {0}; + p2p_buf_ext.flags = XCL_MEM_EXT_P2P_BUFFER; + + cl_mem p2p_buf = clCreateBuffer(context, CL_MEM_READ_ONLY |CL_MEM_EXT_PTR_XILINX, buffersize, &p2p_buf_ext, &err); + +An example of a host only buffer specification + +.. code:: c++ + + cl_mem_ext_ptr_t host_buffer_ext = {0}; + host_buffer_ext.flags = XCL_MEM_EXT_HOST_ONLY; + + cl::Buffer host_buffer (context,CL_MEM_READ_ONLY |CL_MEM_EXT_PTR_XILINX, size, &host_buffer_ext); + +Specify regular buffer location on the device memory banks +---------------------------------------------------------- + +Optionally, for regular buffer, the buffer location can be specified using ``CL_MEM_EXT_PTR_XILINX`` and ``cl_mem_ext_ptr_t``. These can be done by any of these three methods + + 1. Specify a Buffer location by DDR bank name (legacy) + 2. Specify a Buffer location by memory bank index + 3. Specify a Buffer location by kernel name and argument index + + +1. Specify a Buffer location by DDR bank name (legacy) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here is an example of specifying explicit name of the bank, this works for only DDR banks. The supported flags are ``XCL_MEM_DDR_BANK0``, ``XCL_MEM_DDR_BANK1``, ``XCL_MEM_DDR_BANK2``, and ``XCL_MEM_DDR_BANK3``. + +.. code:: c++ + + cl_mem_ext_ptr_t ext = {0}; + ext.banks = XCL_MEM_DDR_BANK0; + cl_int error; + clCreateBuffer(context,CL_MEM_EXT_PTR_XILINX,size,&ext,&error); + + +- **Note**: Explicit bank specification is not required in most of the host code development. The XRT can obtain the bank location for the buffer if the buffer is used for setting the kernel arguments right after the buffer creation, i.e. before any enqueue operation on the buffer. + + +2. Specify a Buffer location by memory bank index +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This approach works for all types of memory banks as it accept index of the memory. The memory index can be obtained by inspecting .xclbin.info file or by xbutil query. + +.. code:: c++ + + // Check the memory index from xclbin.info file   + ext.banks = 2 | XCL_MEM_TOPOLOGY; + cl_int error; + clCreateBuffer(context,CL_MEM_EXT_PTR_XILINX,size,&ext,&error) + +- **Note**: Explicit bank specification is not required in most of the host code development. The XRT can obtain the bank location for the buffer if the buffer is used for setting the kernel arguments right after the buffer creation, i.e. before any enqueue operation on the buffer. + + +3. Specify a Buffer location by kernel name and argument index +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In relative specification style, the memory bank is detected from the kernel arguments, for this purpose the kernel handle and argument index is used as below + +.. code:: c++ + + ext.argidx = idx; + ext.kernel = kernel; + cl_int error; + clCreateBuffer(context,CL_MEM_EXT_PTR_XILINX,size,&ext,&error); + + + +- **Note**: This style of relative bank specification is not required in most of the host code development. The XRT can obtain the bank location for the buffer if the buffer is used for setting the kernel arguments right after the buffer creation, i.e. before any enqueue operation on the buffer. + + +DMA-BUF APIs +============ + +For some use-cases, for example p2p, multiprocess it may required to use buffer sharing. The XRT provides a couple of related APIs for import/export FD from the OpenCL buffer object. + + - ``xclGetMemObjectFd`` : To obtain FD from OpenCL memory object + - ``xclGetMemObjectFromFd``: To obtain OpenCL memory object from FD + +The example of API usage in the p2p context can be found in OpenCL example code in P2P documentation :doc:`p2p` + + + + +Miscellaneous other APIs and Parameter extension +================================================ + +API to get Compute Units Information +------------------------------------ + +The API ``xclGetComputeUnitInfo`` is used to get information of Compute Unit. The API should be used together with specific flags to obtain the related information + + - ``XCL_COMPUTE_UNIT_NAME`` + - ``XCL_COMPUTE_UNIT_INDEX`` + - ``XCL_COMPUTE_UNIT_BASE_ADDRESS`` + - ``XCL_COMPUTE_UNIT_CONNECTIONS`` + +Example to get CU index and CU base address + +.. code:: c++ + + cl_uint cuidx; // retrieve index of first cu in kernel + xclGetComputeUnitInfo(kernel,0,XCL_COMPUTE_UNIT_INDEX,sizeof(cuidx),&cuidx,nullptr); + + size_t cuaddr; + xclGetComputeUnitInfo(kernel,0,XCL_COMPUTE_UNIT_BASE_ADDRESS,sizeof(cuaddr),&cuaddr,nullptr); + + +Parameter extension of the API ``clGetKernelInfo`` +-------------------------------------------------- + +These XRT specific parameters are provided for ``cl_kernel_info`` to be used with API ``clGetKernelInfo``. + + - ``CL_KERNEL_COMPUTE_UNIT_COUNT``: Can be used to get the number of CUs from the kernel handle/object + - ``CL_KERNEL_INSTANCE_BASE_ADDRESS``: The base address of this kernel object + +The below example is showing to get the number of Compute Unit information from the kernel object + +.. code:: c++ + + cl_uint numcus = 0; + clGetKernelInfo(kernel,CL_KERNEL_COMPUTE_UNIT_COUNT,sizeof(cl_uint),&numcus,nullptr); + + +Parameter extension of the API ``clGetKernelArgInfo`` +----------------------------------------------------- + +This XRT specific parameter is provided for ``cl_kernel_arg_info`` to be used with API ``clGetKernelArgInfo``. + + - ``CL_KERNAL_ARG_OFFSET``: To get the argument offset for a specific argument. + +Example shows below to get the offset for the argument 2 for the kernel. + +.. code:: c++ + + size_t foo_offset = 0; + clGetKernelArgInfo(kernel, 2, CL_KERNEL_ARG_OFFSET, sizeof(foo_offset), &foo_offset, nullptr); + + +Parameter extension of the API ``clGetMemObjectInfo`` +----------------------------------------------------- + +This XRT specific parameter is provided for ``cl_mem_info`` to be used with API ``clGetMemObjectInfo``. + + - ``CL_MEM_BANK``: Memory bank index associated with the OpenCL Buffer + +Example shows below to get the offset for the argument 2 for the kernel. + +.. code:: c++ + + int mem_bank_index = 0; + clGetMemObjectInfo(buf, CL_MEM_BANK, sizeof(int), &mem_bank_index, nullptr); + + +Parameter extension of the API ``clGetDeviceInfo`` +-------------------------------------------------- + +This XRT specific parameter is provided for ``cl_device_info`` to be used with API ``clGetDeviceInfo``. + + - ``CL_DEVICE_PCIE_BDF``: To obtain the Bus/Device/Function information of the Pcie based Device + +Example shows below to get PCie BDF information from the OpenCL device + +.. code:: c++ + + char[20] bdf; + clGetDeviceInfo(device, CL_DEVICE_PCIE_BDF, sizeof(bdf), &bdf, nullptr); diff --git a/2024.2/html/_sources/p2p.rst.txt b/2024.2/html/_sources/p2p.rst.txt new file mode 100644 index 00000000000..ee5ac6f2d55 --- /dev/null +++ b/2024.2/html/_sources/p2p.rst.txt @@ -0,0 +1,249 @@ +.. _p2p.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +PCIe Peer-to-Peer (P2P) +*********************** + +PCIe peer-to-peer communication (P2P) is a PCIe feature which enables two PCIe devices to directly transfer data between each other without using host RAM as a temporary storage. The latest version of Alveo PCIe platforms support P2P feature via PCIe Resizeable BAR Capability. + +1. Data can be directly transferred between the DDR/HBM of one Alveo PCIe device and DDR/HBM of a second Alveo PCIe device. +2. A thirdparty peer device like NVMe can directly read/write data from/to DDR/HBM of Alveo PCIe device. + +.. figure:: PCIe-P2P.svg + :figclass: align-center + + PCIe peer-to-peer topology and data transfer + +To use P2P, the DDR/HBM on a Alveo PCIe platform need to be mapped to host IO memory space. The total size of DDR/HBM on most Alveo PCIe platforms is 64 GB all of which needs to mapped to the host IO memory space. Partial mapping a smaller range of device DDR is not supported in this release of XRT. Considering not all host systems (CPU/BIOS/chipset) support 64 GB IO memory space, P2P feature is off by default after a cold reboot or power cycle. The feature needs to be explicitly enabled after a cold boot. + +Note that in addition to BIOS, host CPU should be capable of supporting a very large physical address space. Most desktop class processors do not support very large address space required for supporting 64 GB BAR together with host RAM and address space of all peripherals. + +BIOS Setup +~~~~~~~~~~ + +1. Before turning on P2P, please make sure 64-bit IO is enabled and the maximium host supported IO memory space is greater than total size of DDRs on Alveo PCIe platform in host BIOS setup. + +2. Enable large BAR support in BIOS. This is variously called as *Above 4G decoding*, *PCI 64-bit resource handing above 4G* or *Memory mapped I/O above 4GB* and may be found under PCIe configuration or Boot configuration. + + +Note +....... +It may be necessary to update to the latest BIOS release before enabling P2P. Not doing so may cause the system to continuously reboot during the boot process. If this occurs, power-cycle the system to disable P2P and allow the system to boot normally. + + +Warning +....... + +Mother board vendors have different implementations of large PCIe BAR support in BIOS. If the host system does not support large IO memory well or if host Linux kernel does not support this feature, the host could stop responding after P2P is enabled. Please note that in some cases a warm reboot may not recover the system. Power cycle is required to recover the system in this scenario. As previosuly noted Alveo PCIe platforms turn off P2P after a power cycle. + +Some Mother board BIOS setup allows administrator to set IO Memory base address and some do not. Having large IO Memory base could possibly cause OS kernel crash during warm reboot. Warm reboot crash has been observed on Ubuntu running with kernel 4.15 plus IO memory base been set to 56T in BIOS. To avoid this crash, setting IO memory base to 12T in BIOS is recommended. Per our test, the highest P2P BAR physical address has to be less than 32T. And not all Linux kernels have this issue. + +Enable/Disable P2P +~~~~~~~~~~~~~~~~~~ + +XRT ``xbutil`` is used to enable/disable P2P feature and check current configuration. P2P configuration is persistent across warm reboot. Enabling or disabling P2P requires root privilege. + +Enabling P2P after cold boot is likly to fail because it resizes an exisitng P2P PCIe BAR to a large size and usually Linux will not reserve large IO memory for the PCIe bridges. XRT driver checks the maximum IO memory allowed by host BIOS setup and returns error if there is not enough IO memory for P2P. A warm reboot is required in this scenario after which BIOS and Linux will reassign the required expanded IO memory resource for P2P BAR. +If a system stops responding after enabling P2P and warm reboot does not recover the host then power cycle is required to recover the host. + +Disabling P2P takes effect immediately. Currently XRT does not check if the P2P memory is in use. Administrator needs to make sure P2P is not in use before disabling it. The result of disabling P2P while it is in use is undefined. + +The IO memory region will not be completely released after disabling P2P. Thus, re-enabling P2P does not need reboot. + +Current P2P Configuration +......................... + +``P2P Enabled`` is shown within ``xbutil examine`` output as below. + +:: + + # xbutil examine --device 0000:03:00.1 + + . . . + P2P Status : disabled + +There are three possible values for ``P2P Status`` field above. + +============ ========================================================= +Value Remarks +============ ========================================================= +``enabled`` P2P is enabled +``disabled`` P2P is disabled +``no iomem`` P2P is enabled in device but system could not allocate IO + memory, warm reboot is needed +============ ========================================================= + +Enable P2P +.......... + +The command for enabling p2p is as below + +:: + + # sudo xbutil configure --device 0000:b3:00.1 --p2p enable + + +When trying to enable p2p, it is possible that the Pcie Bar increase cannot happen without an warm reboot. In those situation when trying to enable the P2P, you will see a message for warm reboot request. You can also verify this through ``xbutil examine`` that would show P2P status is ``no iomem`` + + +Disable P2P +........... + +The commands for disabling p2p is as below + +:: + + # sudo xbutil configure --device 0000:b3:00.1 --p2p disable + + +Disabling and re-enabling P2P work without a warm reboot in-between. + + + +PCIe Topology Considerations +............................ + +For best performance peer devices wanting to exchange data should be under the same PCIe switch. + +If IOMMU is enabled then all peer-to-peer transfers are routed through the root complex which will degrade performance significantly. + +To measure peak P2P performance with two Alveo cards, it needs to use indentical configuration of both them. This means same type of Alveo and running same verson of shell. Also both card should be under the same PCIe switch. Second, it has been known that P2P read has better throughput comparing to P2P write. Thus, P2P read should be used in benchmark to get the peak performance. + + + +P2P Data Transfer between FPGA Cards +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OpenCL coding style +................... + +Consider the example situation as below: + + - P2P data transfer from Card1 to Card2 + - Source buffer (`buf_src`) is OpenCL buffer resident of Card1's DDR + - Destination buffer (`buf_dst`) is OpenCL buffer resident of Card2's DDR + +Typical coding style: + + 1. In the OpenCL host code, create separate `cl_context` for each `cl_device_id` + 2. Define `buf_src` as regular buffer + 3. Define `buf_dst` as P2P buffer + 4. Import the P2P buffer or `buf_dst` to the context of `buf_src`. Use the following APIs + + - `xclGetMemObjectFd` + - `xclGetMemObjectFromFd` + 5. Perform the copy operation from `buf_src` to `imported_dst_buf` + +.. code-block:: cpp + + // Source Buffer (regular) in source context + cl_mem src_buf; + src_buf = clCreateBuffer(src_context, CL_MEM_WRITE_ONLY, buffersize, NULL, &err); + clSetKernelArg(kernel_1, 0, sizeof(cl_mem), &src_buf); + + // Note: Handling of err is not shown throughout the code example. However, it is recommended + // to check error for most of the OpenCL APIs + + // Destination buffer (P2P) in destination context + cl_mem dst_buf; + cl_mem_ext_ptr_t dst_buf_ext = {0}; + dst_buf_ext.flags = XCL_MEM_EXT_P2P_BUFFER; + dst_buf = clCreateBuffer(dst_context, CL_MEM_READ_ONLY | CL_MEM_EXT_PTR_XILINX, buffersize, &dst_buf_ext, &err); + clSetKernelArg(kernel_2, 0, sizeof(cl_mem), &dst_buf); + + // Import Destination P2P buffer to the source context + err = xclGetMemObjectFd(dst_buf, &fd); + + cl_mem imported_dst_buf; + + err = xclGetMemObjectFromFd(src_context, device_id[0], 0, fd, &imported_dst_buf); // Import + + // Copy Operation: Local Source buffer -> Imported Destination Buffer + + err = clEnqueueCopyBuffer(src_command_queue, src_buf, imported_dst_buf, 0, 0, sizeof(data_t)*LENGTH, 0, NULL, &event); + + +Profile Report +.............. + +In the Profile Summary report file the P2P transfer is shown under **Data Transfer: DMA Bypass** + +**Data Transfer: DMA Bypass** + ++-------+----------------+-----------+------------+-----------+----------+----------+-------------+ +| Device| Transfer Type | Number of | Transfer | Total Data| Total | Average | Average | +| | | Transfer | Rate(MB/s)| Transfer | Time (ms)| Size (Kb)| Latency(ns) | ++=======+================+===========+============+===========+==========+==========+=============+ +| ... | IN | 4096 | N/A | 0.262 | N/A | 0.064 | N/A | ++-------+----------------+-----------+------------+-----------+----------+----------+-------------+ + +The report shows the P2P transfer corresponding to the receiving device (i.e. transfer type IN). + + +P2P Data Transfer between FPGA Card and NVMe Device +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using the P2P enabled device the data can be transferred between the FPGA device and another NVMe Device, such as SMART SSD, without migrating the data via host memory space. + +OpenCL coding style +................... + +Typical coding style + + 1. Create P2P buffer + 2. Map P2P buffer to the host space + 3. Access the SSD location through Linux File System, the file needs to be opened with `O_DIRECT`. + 4. Read/Write through Linux `pread`/`pwrite` function + +.. code-block:: cpp + + // Creating P2P buffer + cl_mem_ext_ptr_t p2pBOExt = {0}; + + p2pBOExt.flags = XCL_MEM_EXT_P2P_BUFFER; + + p2pBO = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_EXT_PTR_XILINX, chunk_size, &p2pBOExt, NULL); + + clSetKernelArg(kernel, 0, sizeof(cl_mem), p2pBO); + + // Map P2P Buffer into the host space + + p2pPtr = (char *) clEnqueueMapBuffer(command_queue, p2pBO, CL_TRUE, CL_MAP_WRITE | CL_MAP_READ, 0, chunk_size, 0, NULL, NULL, NULL); + + filename = + fd = open(filename, O_RDWR | O_DIRECT); + + // Read chunk_size bytes starting at offset 0 from fd into p2pPtr + pread(fd, p2pPtr, chunk_size, 0); + + // Wrtie chunk_size bytes starting at offset 0 from p2pPtr into fd + pwrite(fd, p2pPtr, chunk_size, 0); + +Profile Report +.............. + +Sample Profile report from FPGA to NVMe Device transfer via P2P + +**Data Transfer: DMA Bypass** + ++------+----------------+----------+------------+------------+----------+----------+------------+ +|Device| Transfer Type | Number of| Transfer | Total Data | Total | Average | Average | +| | | Transfer | Rate(MB/s) | Transfer | Time (ms)| Size (Kb)| Latency(ns)| ++======+================+==========+============+============+==========+==========+============+ +| ... | OUT | 8388608 | N/A | 1073.740 | N/A | 0.128 | 297.141 | ++------+----------------+----------+------------+------------+----------+----------+------------+ + +Sample Profile report from NVMe Device to FPGA transfer via P2P + +**Data Transfer: DMA Bypass** + ++------+----------------+----------+------------+------------+----------+----------+------------+ +|Device| Transfer Type | Number of| Transfer | Total Data | Total | Average | Average | +| | | Transfer | Rate(MB/s) | Transfer | Time (ms)| Size (Kb)| Latency(ns)| ++======+================+==========+============+============+==========+==========+============+ +| ... | IN | 4194304 | N/A | 1073.740 | N/A | 0.256 | 237.344 | ++------+----------------+----------+------------+------------+----------+----------+------------+ diff --git a/2024.2/html/_sources/platforms.rst.txt b/2024.2/html/_sources/platforms.rst.txt new file mode 100644 index 00000000000..5eb7af4f15e --- /dev/null +++ b/2024.2/html/_sources/platforms.rst.txt @@ -0,0 +1,155 @@ +.. _platforms.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +================================= + XRT and Vitis™ Platform Overview +================================= + +`Xilinx Runtime library (XRT) `_ is an open-source easy to use software stack that facilitates management and usage of FPGA/ACAP devices. Users use familiar programming languages like C/C++ or Python to write host code which uses XRT to interact with FPGA/ACAP device. XRT exports well defined set of software APIs that work across PCIe based datacenter platforms and ZYNQ UltraScale+ MPSoC/Versal ACAP based embedded platforms. XRT is key component of `Vitis™ `_ and `Alveo™ `_ solutions. + +User Application Compilation +============================ + +User application is made up of host code written in C/C++/OpenCL or Python. Device code may be written in C/C++/OpenCL or VHDL/Verilog hardware description language. + +.. figure:: Alveo-Compilation-Flow.svg + :figclass: align-center + + User application compilation and execution + +Users use Vitis™ compiler, v++ to compile and link device code for the target platform. Host code written in C/C++/OpenCL may be compiled with gcc/g++. Host code may also be written in Python OpenCL (using PyOpenCL) or Python XRT (using built-in python binding). + +PCIe Based Platforms +==================== + +.. figure:: XRT-Architecture-PCIe.svg + :figclass: align-center + + Alveo PCIe stack + +XRT supports following PCIe based devices: + +1. U200 +2. U250 +3. U280 +4. U50 +5. AWS F1 +6. U30 +7. U25 +8. VCK5000 +9. Advantech VEGA-4000/4002 + +PCIe based platforms are supported on x86_64, PPC64LE and AARCH64 host architectures. The +platform is comprised of physical partitions called *Shell* and *User*. The Shell has two physical +functions: privileged PF0 also called *mgmt pf* and non-privileged PF1 also called *user pf*. Shell +provides basic infrastructure for the Alveo platform. User partition (otherwise known as PR-Region) +contains user compiled binary. XRT uses *Dynamic Function Exchange (DFX)* to load user compiled +binary to the User partition. + +MGMT PF (PF0) +------------- + +XRT Linux kernel driver *xclmgmt* binds to management physical function. Management physical function +provides access to Shell components responsible for **privileged** operations. xclmgmt driver is organized +into subdevices and handles the following functionality: + +* User compiled FPGA image (xclbin) download which involves ICAP (bitstream download) programming, clock + scaling and isolation logic management. +* Loading firmware container called xsabin which contains PLP (for 2 RP platfroms) and firmwares for + embedded Microblazes. The embedded Microblazes perform the functionality of ERT and CMC. +* Access to in-band sensors: temperature, voltage, current, power, fan RPM etc. +* AXI Firewall management in data and control paths. AXI firewalls protect shell and PCIe from untrusted user partition. +* Shell upgrade by programming QSPI flash constroller. +* Device reset and recovery upon detecting AXI firewall trips or explicit request from end user. +* Communication with user pf driver xocl via hardware mailbox. The protocol is defined :doc:`mailbox.proto` +* Interrupt handling for AXI Firewall and Mailbox HW IPs. +* Device DNA (unique ID) discovery and validation. +* DDR and HBM memory ECC handling and reporting. + +USER PF (PF1) +------------- + +XRT Linux kernel driver *xocl* binds to user physical function. User physical function provides access +to Shell components responsible for **non privileged** operations. It also provides access to compute units +in user partition. xocl driver is organized into subdevices and handles the following functionality which +are exercised using well-defined APIs in ``xrt.h`` header file. + +* Device memory topology discovery and device memory management. The driver provides well-defined abstraction + of buffer objects to the clients. +* XDMA/QDMA memory mapped PCIe DMA engine programming and with easy to use buffer migration API. +* Multi-process aware context management with concurrent access to device by multiple processes. +* Compute unit execution pipeline management with the help of hardware scheduler ERT. If ERT is not available + then scheduling is completely handled by xocl driver in software. +* Interrupt handling for PCIe DMA, Compute unit completion and Mailbox messages. +* Setting up of Address-remapper tables for direct access to host memory by kernels compiled into user partition. Direct + access to host memory is enabled by Slave Bridge (SB) in the shell. +* Buffer import and export via Linux DMA-BUF infrastructure. +* PCIe peer-to-peer buffer mapping and sharing over PCIe bus. +* Secure communication infrastructure for exchanging messages with xclmgmt driver. +* Memory-to-memory (M2M) programming for moving data between device DDR, PL-RAM and HBM. + + +.. note:: + Section :doc:`security` describes PCIe platform security and robustness in detail. + + +PCIe Based Hybrid Platforms +--------------------------- + +.. figure:: XRT-Architecture-Hybrid.svg + :figclass: align-center + + Alveo PCIe hybrid stack + +U30 and VCK5000 are MPSoC and Versal platforms respectively are considered hybrid devices. They have hardedned PS +subsystem with ARM APUs in the Shell. The PL fabric is exposed as user partition. The devices act as PCIe endpoint +to PCIe hosts like x86_64, PPC64LE. They have two physical function architecture identical to other Alveo platforms. +On these platforms the ERT subsystem is running on APU. + + +Zynq-7000 and ZYNQ Ultrascale+ MPSoC Based Embedded Platforms +============================================================= + +.. figure:: XRT-Architecture-Edge.svg + :figclass: align-center + + MPSoC Embedded stack + +.. figure:: XRT-Architecture-Versal-Edge.svg + :figclass: align-center + + Versal ACAP Embedded stack + +XRT supports ZYNQ-7000, ZYNQ Ultrascale+ MPSoC and Versal ACAP. User can create their own embedded platforms +and enable XRT with the steps described :doc:`yocto`. + +`Source code `_ and +`pre-built `_ +embedded platforms for the following Xilinx evaluation boards are provided: + +1. ZC706 +2. ZCU102 +3. ZCU104 +4. ZCU106 +5. VCK190 + +MPSoC and Versal based platforms are supported with PetaLinux based common root filesystem and common +kernel. XRT Linux kernel driver *zocl* does the heavy lifting for the embedded platform. It handles the +following functionality with well defined APIs in ``xrt.h`` and ``xrt_aie.h`` (for AIE) header files. + +* PS memory CMA buffer management and cache management. On SVM enabled platforms zocl also manages SMMU. The driver provides + well-defined abstraction of buffer objects to the clients. +* Compute unit execution pipeline management for clients. +* User compiled FPGA image (xclbin) for platforms with Partial Reconfiguration support. +* Buffer object import and export via DMA-BUF. +* Interrupt handling for compute unit completion. +* AIE array programming and graph execution. +* If PL-DDR memory is enabled by instantiating MIG in PL, zocl provides memory management similar to PS memory. +* ZynqMP DMA engine programming for moving data between PS DDR and PL-DDR. +* AIE GMIO data mover programming to move data between NOC and AIE. + +.. note:: + Section :doc:`execution-model` provides a high level overview of execution model. diff --git a/2024.2/html/_sources/platforms_partitions.rst.txt b/2024.2/html/_sources/platforms_partitions.rst.txt new file mode 100644 index 00000000000..953dff8d71e --- /dev/null +++ b/2024.2/html/_sources/platforms_partitions.rst.txt @@ -0,0 +1,73 @@ +.. _platform_partitions.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +================================= + Alveo™ Platform Loading Overview +================================= + +Alveo platforms are architected as two physical FPGA partitions: *Shell* and *User*. Shell provides basic infrastructure +for the platform like PCIe connectivity, board management, DFX support, sensors, clocking, reset, etc. :doc:`security` +enumerates the shell functionality in detail. User partition contains user compiled binary called xclbin which is loaded by XRT using +DFX technology. + +There is a variation of Alveo platform called two stage platform here where the Shell is further split into two partitions: a +thin partition called *Base* and second partition called *Shell*. + +Physical partitions require strict HW compatibility with each other for DFX to work properly. Hence every physical partition +has two interface UUIDs: *parent* UUID and *child* UUID. For simple single stage platforms Shell → User forms parent child +relationship. For complex two stage platforms Base → Shell → User forms the parent child relationship chain. + +.. note:: + Partition compatibility matching is key design component of Alveo platforms and XRT. Partitions have child and parent relationship. A loaded partition exposes child partition UUID to advertise its compatibility requirement for child partition. When loading a child partition the xclmgmt management driver matches parent UUID of the child partition against child UUID exported by the parent. Parent and child partition UUIDs are stored in the xclbin (for user) or xsabin (for base and shell). + +Single Stage Platforms +====================== + +As mentioned before simple platforms have two partitions: Shell and User. + +Shell +----- + +.. figure:: XSA-shell-partitions-1RP.svg + :figclass: align-center + + Alveo shell partitions and loading for simple platform + +Shell partition is loaded from flash at system boot time. It establishes the PCIe link and exposes two physical functions to the BIOS. After OS boot, xclmgmt driver attaches to PCIe physical function 0 exposed by the Shell and then looks for VSEC in PCIe extended configuration space. Using VSEC it determines the logic UUID of Shell and uses the UUID to load matching *xsabin* file from Linux firmware directory. The xsabin file contains metadata to discover peripherals that are part of Shell and firmware(s) for any embedded processors in Shell. + +When xocl driver comes online it requests for Shell metadata via ``XCL_MAILBOX_REQ_PEER_DATA`` opcode over mailbox. xclmgmt managemet driver responds by sending relevant information about shell components such as XDMA or Address Re-mapper that should be managed by xocl driver. + +Shell exports child interface UUID which is used for compatbility check when loading user compiled User partition as part of DFX. The xclmgmt management driver reads the parent interface UUID stored in the User xclbin and matches it with child interface UUID exported by Shell to determine if User xclbin is compatible with the Shell. If match fails loading of xclbin is denied. + +Two Stage Platforms +=================== + +As mentioned before complex platforms have three partitions: Base, Shell and User. + +Base +---- + +Base partition is loaded from flash at system boot time. It establishes the PCIe link and exposes two physical functions to the BIOS. After OS boot, xclmgmt driver attaches to physical function 0 exposed by Base and then looks for VSEC in PCIe extended configuration space. Using VSEC it determines the logic UUID of Base and uses the UUID to load matching *xsabin* from Linux firmware directory. The xsabin contains metadata to discover peripherals that are part of Base and firmware(s) for any embedded processors in Base. Base only contains essential peripherals such as ICAP, QSPI controller, AXI Firewalls etc. + +Base exports child interface UUID which is used for compatbility check when loading vendor provided Shell partition as part of DFX. The xclmgmt management driver reads the parent interface UUID stored in the Shell xsabin and matches it with child interface UUID exported by Base to determine if Shell xsabin is compatible with the Base. If match fails loading of xsabin is denied. + +Shell +----- + +.. figure:: XSA-shell-partitions-2RP.svg + :figclass: align-center + + Alveo shell partitions and loading for 2RP platform + +In two stage platforms, Shell partition contains additional shell components such as PCIe DMA engine, AXI Firewalls, Address Re-mapper, ERT, etc. Shell partition is explicitly loaded by system administrator using ``xbmgmt program --shell `` command. After Shell partition is loaded, xclmgmt driver posts ``XCL_MAILBOX_REQ_MGMT_STATE`` message to the xocl driver via mailbox to indicate shell has changed. xocl driver then requests for metadata via ``XCL_MAILBOX_REQ_PEER_DATA`` opcode. xclmgmt managemet driver responds by sending relevant information about shell components such as XDMA that should be managed by xocl driver. A system administrator can pre-load a Shell based on the workload the system is being provised for. A new Shell xsabin load clobbers previous Shell and User images. + +User +==== + +User partition contains user compiled components like acceleration engines/compute kernels etc. It is loaded by xclmgmt driver on request by user. XRT provides API ``xclLoadXclBin()`` and command line utility ``xbutil program`` to help load xclbins. This allows users to dynamically swap the image running on User parition to meet their workload requirements. + +.. note:: + Refer to :doc:`mailbox.main` for detailed protocol used by xocl and xclmgmt drivers for loading Shell xsabin and User xclbin. diff --git a/2024.2/html/_sources/pyxrt.rst.txt b/2024.2/html/_sources/pyxrt.rst.txt new file mode 100644 index 00000000000..688cc7f833c --- /dev/null +++ b/2024.2/html/_sources/pyxrt.rst.txt @@ -0,0 +1,6 @@ + +XRT Python Bindings +------------------- + +.. automodule:: pyxrt + :members: diff --git a/2024.2/html/_sources/security.rst.txt b/2024.2/html/_sources/security.rst.txt new file mode 100644 index 00000000000..df97c055b01 --- /dev/null +++ b/2024.2/html/_sources/security.rst.txt @@ -0,0 +1,360 @@ +.. _security.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +Security of Alveo Platform +************************** + +.. figure:: XSA-shell.svg + :figclass: align-center + + Alveo shell mgmt and user components, data and control paths + +Security is built into Alveo platform hardware and software architecture. The platform +is made up of two physical partitions: an immutable Shell and user compiled User partition. +This design allows end users to perform Dynamic Function eXchange (Partial Reconfiguration +in classic FPGA terminology) in the well defined User partition while the static Shell +provides key infrastructure services. Alveo shells assume PCIe host (with access to PF0) is +part of *Root-of-Trust*. The following features reinforce security of the platform: + +1. Two physical function shell design +2. Clearly classified trusted vs untrusted shell peripherals +3. Signing of xclbins +4. AXI Firewall +5. Well-defined compute kernel execution model +6. No direct access to PCIe TLP from User partition +7. Treating User partition as untrused partition + +Shell +===== + +The Shell provides core infrastructure to the Alveo platform. It includes *hardened* PCIe +block which provides physical connectivity to the host PCIe bus via two physical functions +as described in :doc:`platforms`. +The Shell is *trusted* partition of the Alveo platform and for all practical purposes +should be treated like an ASIC. During system boot, the shell is loaded from the PROM. +Once loaded, the Shell cannot be changed. + +In the figure above, the Shell peripherals shaded blue can only be accessed from *management* +physical function 0 (PF0) while those shaded violet can be accessed from *user* physical +function 1 (PF1). From PCIe topology point of view PF0 *owns* the device and performs +supervisory actions on the device. It is part of *Root-of-Trust*. Peripherals shaded blue +are trusted while those shaded violet are not. Alveo shells use a specialized IP called +**PCIe Demux** which routes PCIe traffic destined for PF0 to PF0 AXI network and those destined +for PF1 to PF1 AXI network. It is responsible for the necessary isolation between PF0 and PF1. + +Trusted peripherals includes ICAP for bitstream download (DFX), CMC for sensors and thermal +management, Clock Wizards for clock scaling, QSPI Ctrl for PROM access (shell upgrades), DFX +Isolation, Firewall controls and ERT UART. + +All peripherals in the shell except XDMA/QDMA are slaves from PCIe point of view and cannot +initiate PCIe transactions. Alveo shells have one of XDMA or QDMA PCIe DMA engine. Both +`XDMA `_ and +`QDMA `_ +are regular PCIe scatter-gather DMA engine with a well defined programming model. + +The Shell provides a *control* path and a *data* +path to the user compiled image loaded on User partition. The Firewalls in control and data +paths protect the Shell from un-trusted User partition. For example if a slave in DFX has a +bug or is malicious the appropriate firewall will step in and protect the Shell from the +failing slave as soon as a non compliant AXI transaction is placed on AXI bus. + +Newer revisions of shell have a feature called :doc:`sb` which provides direct access to host +memory from kernels in the User partition. With this feature kernels can initiate PCIe burst +transfers from PF1 without direct access to PCIe bus. AXI Firewall (SI) in reverse direction protects +PCIe from non-compliant transfers. + +.. note:: + Features :doc:`sb` and :doc:`p2p` are not available in all shells. + + +For more information on firewall protection see `Firewall`_ section below. + +For shell update see `Shell Update`_ section below. + +Compatibility enforcement between Shell and User xclbin is described in :doc:`platform_partitions` + +PCIe Topology +------------- + +As mentioned before Alveo platforms have two physical function architecture where each function has its +own BARs. The table below gives overview of the topology and functionality. + +== === ======= =============================================================== +PF BAR Driver Purpose +== === ======= =============================================================== +0 0 xclmgmt Memory mapped access to privileged IPs in the shell as shown + in the Figure above. +0 2 xclmgmt Setup MSI-X vector table +1 0 xocl Access to register maps of user compiled compute units in the + DFX region +1 2 xocl Memory mapped access to XDMA/QDMA PCIe DMA engine programming + registers +1 4 xocl CPU direct and P2P access to device attached DDR/HBM/PL-RAM + memory. + By default its size is limited to 256MB but can be expanded + using XRT xbutil tool as described in :doc:`p2p` +== === ======= =============================================================== + +Sample output of Linux ``lspci`` command for U50 device below:: + + dx4300:~>lspci -vvv -d 10ee: + 02:00.0 Processing accelerators: Xilinx Corporation Device 5020 + Subsystem: Xilinx Corporation Device 000e + Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx+ + Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- SERR- + Kernel driver in use: xclmgmt + Kernel modules: xclmgmt + + 02:00.1 Processing accelerators: Xilinx Corporation Device 5021 + Subsystem: Xilinx Corporation Device 000e + Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx- + Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- SERR- + Kernel driver in use: xocl + Kernel modules: xocl + + dx4300:~> + + +Dynamic Function eXchange +========================= + +User compiled image packaged as xclbin is loaded on the Dynamic Functional eXchange +partition by the Shell. The image may be signed with a private key and its public +key registered with Linux kernel keyring. The xclbin signature is validated by +xclmgmt driver. This guarantees that only known good user compiled images are loaded by +the Shell. The image load is itself effected by xclmgmt driver which binds to PF0. +xclmgmt driver downloads the bitstream packaged in the bitstream section of xclbin by +programming the ICAP peripheral. The management driver also discovers the target frequency +of the User partition by reading the xclbin clock section and then programs the clocks +which are controlled from Shell. DFX is exposed as one atomic ioctl by xclmgmt driver. + +xclbin is a container which packs FPGA bitstream for the User partition and host of related +metadata like clock frequencies, information about instantiated compute units, etc. The +compute units typically expose a well defined register space on the PCIe BAR for access by +XRT. An user compiled image does not have any physical path to directly interact with PCIe +Bus. Compiled images do have access to device DDR. + +More information on xclbin can be found in :doc:`formats`. + +Xclbin Generation +================= + +Users compile their Verilog/VHDL/OpenCL/C/C++ design using Vitis™ compiler, v++ which also takes +the shell specification as a second input. By construction the Vitis™ compiler, v++ generates image +compatible with User partition of the shell. The compiler uses a technology called *PR Verify* +to ensure that the user design physically confines itself to User partition and does not attempt +to overwrite portions of the Shell. It also validates that all the IOs between the DFX and +Shell are going through fixed pins exposed by Shell. + +Signing of Xclbins +================== + +xclbin signing process is similar to signing of Linux kernel modules. xclbins can be signed by +XRT **xclbinutil** utility. The signing adds a PKCS7 signature at the end of xclbin. The signing +certificate is then registered with appropriate key-ring. When Linux is running in UEFI secure +mode, signature verification is enforced using signing certification in *system* key-ring (when +Linux is not running in secure mode there is no such verification). + + +Firewall +======== + +Alveo hardware design uses standard AXI bus. As shown in the figure the control path uses AXI-Lite +and data path uses AXI4 full. Specialized hardware element called +`AXI Protocol Firewall `_ +monitors all transactions +going across the bus into the un-trusted User partition. It is possible that one or more AXI slave in the DFX +partition is not fully AXI-compliant or deadlocks/stalls/hangs during operation. When an AXI slave in DFX +partition fails, AXI Firewall *trips* -- it starts completing AXI transactions on behalf of the slave so the +master and the specific AXI bus is not impacted -- to protect the Shell. The AXI Firewall starts completing +all transactions on behalf of misbehaving slave while also notifying the mgmt driver about the trip. The +xclmgmt driver then starts taking recovery action. xclmgmt posts a XCL_MAILBOX_REQ_FIREWALL message to xocl using +MailBox to inform the peer about FireWall trip. xocl can suggest a reset by sending a XCL_MAILBOX_REQ_HOT_RESET message +to xclmgmt via mailBox. Note that even if no reset is performed the AXI Protocol Firewall will continue to protect the host PCIe bus. +DFX partition will be unavailable till device is reset. **A reboot of host is not required to reset the device.** + +Alveo boards with multiple FPGA devices on the same board like U30 support card level reset. Mailbox usage by each device on the card +is similar to that of single device cards, however firewall trip in one device will trigger reset to all devices on the card. + +AXI Firewall in Slave Interface (SI) mode also protects the host from errant transactions initiated by kernels over +Slave Bridge. For example if an AXI master kernel in the Dynamic Region issues a non compliant AXI transaction like +starting a burst transfer but stalling afterwards, the AXI Firewall (SI) will complete the transaction on behalf of the +failing kernel. This protects PCIe from un-correctable errors. + +PCIe Bus Safety +=============== + +As explained in the Firewall section above PCIe bus is protected by AXI Firewalls on both control and data path. +DFX Isolation only exposes AXI bus (AXI-Lite for control and AXI-Full for data paths) to the Dynamic Region. Kernels +compiled by user which sit in Dynamic Region do **not have direct access to PCIe bus** and hence cannot generate TLP +packets. This removes the risk of an errant User partition compromising the PCIe bus and taking over the host system. PCIe Demux +IP ensures that all PCIe transactions mastered by device over P2P, XDMA/QDMA and SB data paths are only possible over +PF1. This is critical for `Pass-through Virtualization`_ where host should not see any transactions initiated by PF1. + +Deployment Models +================= + +In all deployment models PCIe host with access to PF0 is considered part of *Root-of-Trust*. + +Baremetal +--------- + +In Baremetal deployment model, both physical functions are visible to the end user who *does not* +have root privileges. End user have access to both XRT **xclmgmt** and XRT **xocl** drivers. The system +administrator trusts both drivers which provide well defined :doc:`mgmt-ioctl.main` and :doc:`xocl_ioctl.main`. +End user does have the privilege to load xclbins which should be signed for maximum security. This +will ensure that only known good xclbins are loaded by end users. + +Certain operations like resetting the board and upgrading the flash image on PROM (from which the shell +is loaded on system boot) require root privileges and are effected by xclmgmt driver. + +Pass-through Virtualization +--------------------------- + +In Pass-through Virtualization deployment model, management physical function (PF0) is only visible to the host +but user physical function (PF1) is visible to the guest VM. Host considers the guest VM a *hostile* environment. +End users in guest VM may be root and may be running modified implementation of XRT **xocl** driver -- XRT +**xclmgmt** driver does not trust XRT xocl driver. xclmgmt as described before exposes well defined +:doc:`mgmt-ioctl.main` to the host. In a good and clean deployment end users in guest VM interact with +standard xocl using well defined :doc:`xocl_ioctl.main`. + +As explained under the Shell section above, by design xocl has limited access to violet shaded Shell peripherals. +This ensures that users in guest VM cannot perform any privileged operation like updating flash image or device +reset. A user in guest VM can only perform operations listed under USER PF (PF1) section in :doc:`platforms`. + +A guest VM user can potentially crash a compute unit in User partition, deadlock data path AXI bus or corrupt +device memory. If the user has root access he may compromise VM memory. But none of this can bring down the +host or the PCIe bus. Host memory is protected by system IOMMU. Device reset and recovery is described below. + +A user cannot load a malicious xclbin on the User partition since xclbin downloads are done by xclmgmt +drive. xclbins are passed on to the host via a plugin based MPD/MSD framework defined in +:doc:`mailbox.main`. Host can add any extra checks necessary to validate xclbins received from guest VM. + +This deployment model is ideal for public cloud where host does not trust the guest VM. This is the prevalent +deployment model for FaaS operators. + +Summary +------- + ++------------------------------+---------------------------+ +| Behavior | Deployment Model | +| +------------+--------------+ +| | Bare Metal | Pass-through | ++=================+============+============+==============+ +| System admin | xocl | Yes | No | +| trusts drivers +------------+------------+--------------+ +| | xclmgmt | Yes | Yes | ++-----------------+------------+------------+--------------+ +| End user has | xocl | No | Maybe | +| root access +------------+------------+--------------+ +| | xclmgmt | No | No | ++-----------------+------------+------------+--------------+ +| End user can crash device | Yes | Yes | ++------------------------------+------------+--------------+ +| End user can crash PCIe bus | No | No | ++------------------------------+------------+--------------+ +| End user with root access | Yes | No | +| can crash PCIe bus | | | ++------------------------------+------------+--------------+ + + +Mailbox +======= + +Mailbox is used for communication between user physical function driver, xocl and management physical +function driver, xclmgmt. The Mailbox hardware design and xclmgmt driver mailbox handling implementation +has the ability to throttle requests coming from xocl driver. + +xclmgmt driver has twofold security protections on the h/w mailbox. From packet layer, xclmgmt monitors +the receiving packet rates and can enforce a threshold. If the receiving packet rates exceeds the threshold, +the mailbox is disabled which prevents the guest from sending any more commands over mailbox. Only +a hot reset on the FPGA device from xclmgmt can recover it. From message layer,system administrator can configure +the xclmgmt driver to ignore specific mailbox opcodes. + +Here is an example how System administrator managing the privileged management physical function driver xclmgmt +can configure the mailbox to ignore specific opcodes using xbmgmt utility. + +.. code-block:: bash + + # In host + Host>$ sudo xbmgmt dump --config --output /tmp/config.ini -d bdf + + # Edit the dumped ini file and change the value to key 'mailbox_channel_disable' + # eg. if both xclbin download and reset are to be disabled, one can set + # mailbox_channel_disable=0x120 + # where 0x120 is 1 << XCL_MAILBOX_REQ_LOAD_XCLBIN | + # 1 << XCL_MAILBOX_REQ_HOT_RESET + # as defined as below + # XCL_MAILBOX_REQ_UNKNOWN = 0, + # XCL_MAILBOX_REQ_TEST_READY = 1, + # XCL_MAILBOX_REQ_TEST_READ = 2, + # XCL_MAILBOX_REQ_LOCK_BITSTREAM = 3, + # XCL_MAILBOX_REQ_UNLOCK_BITSTREAM = 4, + # XCL_MAILBOX_REQ_HOT_RESET = 5, + # XCL_MAILBOX_REQ_FIREWALL = 6, + # XCL_MAILBOX_REQ_LOAD_XCLBIN_KADDR = 7, + # XCL_MAILBOX_REQ_LOAD_XCLBIN = 8, + # XCL_MAILBOX_REQ_RECLOCK = 9, + # XCL_MAILBOX_REQ_PEER_DATA = 10, + # XCL_MAILBOX_REQ_USER_PROBE = 11, + # XCL_MAILBOX_REQ_MGMT_STATE = 12, + # XCL_MAILBOX_REQ_CHG_SHELL = 13, + # XCL_MAILBOX_REQ_PROGRAM_SHELL = 14, + # XCL_MAILBOX_REQ_READ_P2P_BAR_ADDR = 15, + + Host>$ vi /tmp/config.ini + + # Load config + Host>$ xbmgmt advanced --load-conf --input=/tmp/config.ini -d bdf + + +:doc:`mailbox.main` has details on mailbox usage. + +Device Reset and Recovery +========================= + +Device reset and recovery is a privileged operation and can only be performed by xclmgmt driver. xocl +driver can request device reset by sending a message to xclmgmt driver over the Mailbox. An end user +can reset a device by using XRT **xbutil** utility. This utility talks to xocl driver which uses the reset +message as defined in :doc:`mailbox.main` + +Currently Alveo boards are reset by using PCIe bus *hot reset* mechanism. This resets the board peripherals +and also the PCIe link. As part of reset, drivers kill all the clients which have opened the device node by +sending them a SIGBUS. + +On some Alveo boards like u30, there are multiple FPGA devices supported with help of pcie bifurcation. The +reset in this case is card level reset, which means, a reset issued from one FPGA device will result in all +FPGAs on same board being reset. Both xocl and xclmgmt drivers can identify other FPGA devices on same board +and handle the reset accordingly. + +Shell Update +============ + +Shell update is like firmware update in conventional PCIe devices. Shell updates are distributed as signed +RPM/DEB package files by Xilinx®. Shells may be upgraded using XRT **xbmgmt** utility by system administrators +only. The upgrade process will update the PROM. A cold reboot of host is required in In order to boot the +platform from the updated image. + +Compute Kernel Execution Models +=============================== + +XRT and Alveo support software defined compute kernel execution models having standard AXI hardware +interfaces. More details on :doc:`xrt_kernel_executions`. These well understood models do not require +direct register access from user space. To execute a compute kernel XRT has a well defined *exec command buffer* +API and a *wait for exec completion* API. These operations are exposed as ioctls by the xocl driver. diff --git a/2024.2/html/_sources/sysfs.rst.txt b/2024.2/html/_sources/sysfs.rst.txt new file mode 100644 index 00000000000..de94675de79 --- /dev/null +++ b/2024.2/html/_sources/sysfs.rst.txt @@ -0,0 +1,888 @@ +.. _sysfs.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +Linux Sys FileSystem Nodes +************************** + +``xocl`` and ``xclmgmt`` drivers expose several ``sysfs`` nodes under +the ``pci`` device root node. The sysfs nodes are populated by +platform drivers present in the respective drivers. + +xocl +==== + +The ``xocl`` driver exposes various sections of the ``xclbin`` image +including the ``xclbinuuid`` on ``sysfs``. This makes it very +convenient for tools (such as ``xbutil``) to discover characteristics +of the image currently loaded on the FPGA. The data layout of ``xclbin`` +sections are defined in file ``xclbin.h`` which can be found under +``runtime/core/include`` directory. Platform drivers XDMA, ICAP, +MB Scheduler, Mailbox, XMC, XVC, FeatureROM export their nodes on sysfs. + +Sample output of tree command below:: + + dx4300:/<1>devices/pci0000:00/0000:00:15.0>tree -n 0000:04:00.1 + 0000:04:00.1 + ├── broken_parity_status + ├── class + ├── config + ├── config_mailbox_channel_switch + ├── config_mailbox_comm_id + ├── consistent_dma_mask_bits + ├── current_link_speed + ├── current_link_width + ├── d3cold_allowed + ├── device + ├── dev_offline + ├── dma_mask_bits + ├── dma.xdma.u.1025 + │ ├── channel_stat_raw + │ ├── driver -> ../../../../../bus/platform/drivers/xocl_xdma + │ ├── driver_override + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── driver -> ../../../../bus/pci/drivers/xocl + ├── driver_override + ├── drm + │ ├── card1 + │ │ ├── dev + │ │ ├── device -> ../../../0000:04:00.1 + │ │ ├── power + │ │ │ ├── async + │ │ │ ├── autosuspend_delay_ms + │ │ │ ├── control + │ │ │ ├── runtime_active_kids + │ │ │ ├── runtime_active_time + │ │ │ ├── runtime_enabled + │ │ │ ├── runtime_status + │ │ │ ├── runtime_suspended_time + │ │ │ └── runtime_usage + │ │ ├── subsystem -> ../../../../../../class/drm + │ │ └── uevent + │ └── renderD129 + │ ├── dev + │ ├── device -> ../../../0000:04:00.1 + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../class/drm + │ └── uevent + ├── enable + ├── hwmon + │ └── hwmon5 + │ ├── curr1_average + │ ├── curr1_highest + │ ├── curr1_input + │ ├── curr2_average + │ ├── curr2_highest + │ ├── curr2_input + │ ├── curr3_average + │ ├── curr3_highest + │ ├── curr3_input + │ ├── curr4_average + │ ├── curr4_highest + │ ├── curr4_input + │ ├── curr5_average + │ ├── curr5_highest + │ ├── curr5_input + │ ├── curr6_average + │ ├── curr6_highest + │ ├── curr6_input + │ ├── device -> ../../../0000:04:00.1 + │ ├── name + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../class/hwmon + │ └── uevent + ├── icap.u.1025 + │ ├── cache_expire_secs + │ ├── clock_freqs + │ ├── clock_freq_topology + │ ├── connectivity + │ ├── debug_ip_layout + │ ├── driver -> ../../../../../bus/platform/drivers/icap.u + │ ├── driver_override + │ ├── idcode + │ ├── ip_layout + │ ├── mem_topology + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── iommu -> ../../0000:00:00.2/iommu/ivhd0 + ├── iommu_group -> ../../../../kernel/iommu_groups/11 + ├── irq + ├── kdsstat + ├── link_speed + ├── link_speed_max + ├── link_width + ├── link_width_max + ├── local_cpulist + ├── local_cpus + ├── mailbox_connect_state + ├── mailbox.u.1025 + │ ├── connection + │ ├── driver -> ../../../../../bus/platform/drivers/mailbox.u + │ ├── driver_override + │ ├── mailbox + │ ├── mailbox_ctl + │ ├── mailbox_pkt + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ ├── uevent + │ └── xrt_user + │ └── mailbox.u1025 + │ ├── dev + │ ├── device -> ../../../mailbox.u.1025 + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../../class/xrt_user + │ └── uevent + ├── max_link_speed + ├── max_link_width + ├── mb_scheduler.u.1025 + │ ├── driver -> ../../../../../bus/platform/drivers/xocl_mb_sche + │ ├── driver_override + │ ├── kds_cucounts + │ ├── kds_custat + │ ├── kds_numcdmas + │ ├── kds_numcus + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── memstat + ├── memstat_raw + ├── mig_calibration + ├── modalias + ├── msi_bus + ├── msi_irqs + │ ├── 75 + │ ├── 76 + │ ├── 77 + │ ├── 78 + │ ├── 79 + │ ├── 80 + │ ├── 81 + │ ├── 82 + │ ├── 83 + │ ├── 84 + │ ├── 85 + │ ├── 86 + │ ├── 87 + │ ├── 88 + │ ├── 89 + │ ├── 90 + │ ├── 91 + │ ├── 92 + │ ├── 93 + │ └── 94 + ├── numa_node + ├── p2p_enable + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── ready + ├── remove + ├── rescan + ├── resource + ├── resource0 + ├── resource0_wc + ├── resource2 + ├── resource2_wc + ├── resource4 + ├── resource4_wc + ├── revision + ├── rom.u.1025 + │ ├── ddr_bank_count_max + │ ├── ddr_bank_size + │ ├── dr_base_addr + │ ├── driver -> ../../../../../bus/platform/drivers/rom.u + │ ├── driver_override + │ ├── FPGA + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ ├── timestamp + │ ├── uevent + │ └── VBNV + ├── root_dev -> ../../0000:00:15.0 + ├── subsystem -> ../../../../bus/pci + ├── subsystem_device + ├── subsystem_vendor + ├── uevent + ├── userbar + ├── user_pf + ├── vendor + ├── xclbinuuid + ├── xmc.u.1025 + │ ├── cache_expire_secs + │ ├── capability + │ ├── driver -> ../../../../../bus/platform/drivers/xmc.u + │ ├── driver_override + │ ├── error + │ ├── host_msg_error + │ ├── host_msg_header + │ ├── host_msg_offset + │ ├── id + │ ├── modalias + │ ├── pause + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── power_checksum + │ ├── power_flag + │ ├── reset + │ ├── sensor + │ ├── status + │ ├── subsystem -> ../../../../../bus/platform + │ ├── temp_by_mem_topology + │ ├── uevent + │ ├── version + │ ├── xmc_0v85 + │ ├── xmc_12v_aux_curr + │ ├── xmc_12v_aux_vol + │ ├── xmc_12v_pex_curr + │ ├── xmc_12v_pex_vol + │ ├── xmc_12v_sw + │ ├── xmc_1v2_top + │ ├── xmc_1v8 + │ ├── xmc_3v3_aux_vol + │ ├── xmc_3v3_pex_vol + │ ├── xmc_cage_temp0 + │ ├── xmc_cage_temp1 + │ ├── xmc_cage_temp2 + │ ├── xmc_cage_temp3 + │ ├── xmc_ddr_vpp_btm + │ ├── xmc_ddr_vpp_top + │ ├── xmc_dimm_temp0 + │ ├── xmc_dimm_temp1 + │ ├── xmc_dimm_temp2 + │ ├── xmc_dimm_temp3 + │ ├── xmc_fan_rpm + │ ├── xmc_fan_temp + │ ├── xmc_fpga_temp + │ ├── xmc_mgt0v9avcc + │ ├── xmc_mgtavtt + │ ├── xmc_se98_temp0 + │ ├── xmc_se98_temp1 + │ ├── xmc_se98_temp2 + │ ├── xmc_sys_5v5 + │ ├── xmc_vcc1v2_btm + │ ├── xmc_vccint_curr + │ └── xmc_vccint_vol + └── xvc_pub.u.1025 + ├── driver -> ../../../../../bus/platform/drivers/xvc.u + ├── driver_override + ├── modalias + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── subsystem -> ../../../../../bus/platform + ├── uevent + └── xrt_user + └── xvc_pub.u1025 + ├── dev + ├── device -> ../../../xvc_pub.u.1025 + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── subsystem -> ../../../../../../../class/xrt_user + └── uevent + + 59 directories, 306 files + + +xclmgmt +======= + +The ``xclmgmt`` driver exposes various sections of the ``xclbin`` image +including the ``xclbinuuid`` on ``sysfs``. This makes it very +convenient for tools (such as ``xbutil``) to discover characteristics +of the image currently loaded on the FPGA. The data layout of ``xclbin`` +sections are defined in file ``xclbin.h`` which can be found under +``runtime/core/include`` directory. Platform drivers ICAP, FPGA Manager, +AXI Firewall, Mailbox, XMC, XVC, FeatureROM export their nodes on sysfs. + +Sample output of tree command below:: + + dx4300:/<1>devices/pci0000:00/0000:00:15.0>tree 0000:04:00.0 + 0000:04:00.0 + ├── board_name + ├── broken_parity_status + ├── class + ├── config + ├── config_mailbox_channel_switch + ├── config_mailbox_comm_id + ├── consistent_dma_mask_bits + ├── current_link_speed + ├── current_link_width + ├── d3cold_allowed + ├── device + ├── dev_offline + ├── dma_mask_bits + ├── driver -> ../../../../bus/pci/drivers/xclmgmt + ├── driver_override + ├── enable + ├── error + ├── feature_rom_offset + ├── firewall.m.1024 + │ ├── clear + │ ├── detected_level + │ ├── detected_status + │ ├── detected_time + │ ├── driver -> ../../../../../bus/platform/drivers/xocl_firewall + │ ├── driver_override + │ ├── inject + │ ├── level + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── status + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── flash_type + ├── fmgr.m.1024 + │ ├── driver -> ../../../../../bus/platform/drivers/xocl_fmgr + │ ├── driver_override + │ ├── fpga_manager + │ │ └── fpga0 + │ │ ├── device -> ../../../fmgr.m.1024 + │ │ ├── name + │ │ ├── power + │ │ │ ├── async + │ │ │ ├── autosuspend_delay_ms + │ │ │ ├── control + │ │ │ ├── runtime_active_kids + │ │ │ ├── runtime_active_time + │ │ │ ├── runtime_enabled + │ │ │ ├── runtime_status + │ │ │ ├── runtime_suspended_time + │ │ │ └── runtime_usage + │ │ ├── state + │ │ ├── subsystem -> ../../../../../../../class/fpga_manager + │ │ └── uevent + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── hwmon + │ ├── hwmon3 + │ │ ├── device -> ../../../0000:04:00.0 + │ │ ├── in0_highest + │ │ ├── in0_input + │ │ ├── in0_lowest + │ │ ├── in1_highest + │ │ ├── in1_input + │ │ ├── in1_lowest + │ │ ├── in2_highest + │ │ ├── in2_input + │ │ ├── in2_lowest + │ │ ├── name + │ │ ├── power + │ │ │ ├── async + │ │ │ ├── autosuspend_delay_ms + │ │ │ ├── control + │ │ │ ├── runtime_active_kids + │ │ │ ├── runtime_active_time + │ │ │ ├── runtime_enabled + │ │ │ ├── runtime_status + │ │ │ ├── runtime_suspended_time + │ │ │ └── runtime_usage + │ │ ├── subsystem -> ../../../../../../class/hwmon + │ │ ├── temp1_highest + │ │ ├── temp1_input + │ │ ├── temp1_lowest + │ │ └── uevent + │ └── hwmon4 + │ ├── curr1_average + │ ├── curr1_highest + │ ├── curr1_input + │ ├── curr2_average + │ ├── curr2_highest + │ ├── curr2_input + │ ├── curr3_average + │ ├── curr3_highest + │ ├── curr3_input + │ ├── curr4_average + │ ├── curr4_highest + │ ├── curr4_input + │ ├── curr5_average + │ ├── curr5_highest + │ ├── curr5_input + │ ├── curr6_average + │ ├── curr6_highest + │ ├── curr6_input + │ ├── device -> ../../../0000:04:00.0 + │ ├── name + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../class/hwmon + │ └── uevent + ├── icap.m.1024 + │ ├── cache_expire_secs + │ ├── clock_freqs + │ ├── clock_freq_topology + │ ├── connectivity + │ ├── debug_ip_layout + │ ├── driver -> ../../../../../bus/platform/drivers/icap.m + │ ├── driver_override + │ ├── idcode + │ ├── ip_layout + │ ├── mem_topology + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── shell_program + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── instance + ├── iommu -> ../../0000:00:00.2/iommu/ivhd0 + ├── iommu_group -> ../../../../kernel/iommu_groups/11 + ├── irq + ├── link_speed + ├── link_speed_max + ├── link_width + ├── link_width_max + ├── local_cpulist + ├── local_cpus + ├── mailbox.m.1024 + │ ├── connection + │ ├── driver -> ../../../../../bus/platform/drivers/mailbox.m + │ ├── driver_override + │ ├── mailbox + │ ├── mailbox_ctl + │ ├── mailbox_pkt + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ ├── uevent + │ └── xrt_mgmt + │ └── mailbox.m1024 + │ ├── dev + │ ├── device -> ../../../mailbox.m.1024 + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../../class/xrt_mgmt + │ └── uevent + ├── max_link_speed + ├── max_link_width + ├── mfg + ├── mgmt_pf + ├── mig_calibration + ├── modalias + ├── msi_bus + ├── msi_irqs + │ ├── 52 + │ ├── 53 + │ ├── 54 + │ ├── 55 + │ ├── 56 + │ ├── 57 + │ ├── 58 + │ ├── 59 + │ ├── 60 + │ ├── 61 + │ ├── 62 + │ ├── 63 + │ ├── 64 + │ ├── 65 + │ ├── 66 + │ ├── 67 + │ ├── 68 + │ ├── 69 + │ ├── 70 + │ └── 71 + ├── nifd_pri.m.1024 + │ ├── driver -> ../../../../../bus/platform/drivers/nifd.m + │ ├── driver_override + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ └── uevent + ├── numa_node + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── ready + ├── remove + ├── rescan + ├── resource + ├── resource0 + ├── resource0_wc + ├── resource2 + ├── resource2_wc + ├── revision + ├── rom.m.1024 + │ ├── ddr_bank_count_max + │ ├── ddr_bank_size + │ ├── dr_base_addr + │ ├── driver -> ../../../../../bus/platform/drivers/rom.m + │ ├── driver_override + │ ├── FPGA + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ ├── timestamp + │ ├── uevent + │ └── VBNV + ├── slot + ├── subdev_offline + ├── subdev_online + ├── subsystem -> ../../../../bus/pci + ├── subsystem_device + ├── subsystem_vendor + ├── sysmon.m.1024 + │ ├── driver -> ../../../../../bus/platform/drivers/xocl_sysmon + │ ├── driver_override + │ ├── modalias + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../bus/platform + │ ├── temp + │ ├── uevent + │ ├── vcc_aux + │ ├── vcc_bram + │ └── vcc_int + ├── uevent + ├── userbar + ├── vendor + ├── version + ├── xmc.m.1024 + │ ├── cache_expire_secs + │ ├── capability + │ ├── driver -> ../../../../../bus/platform/drivers/xmc.m + │ ├── driver_override + │ ├── error + │ ├── host_msg_error + │ ├── host_msg_header + │ ├── host_msg_offset + │ ├── id + │ ├── modalias + │ ├── pause + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── power_checksum + │ ├── power_flag + │ ├── reset + │ ├── sensor + │ ├── status + │ ├── subsystem -> ../../../../../bus/platform + │ ├── temp_by_mem_topology + │ ├── uevent + │ ├── version + │ ├── xmc_0v85 + │ ├── xmc_12v_aux_curr + │ ├── xmc_12v_aux_vol + │ ├── xmc_12v_pex_curr + │ ├── xmc_12v_pex_vol + │ ├── xmc_12v_sw + │ ├── xmc_1v2_top + │ ├── xmc_1v8 + │ ├── xmc_3v3_aux_vol + │ ├── xmc_3v3_pex_vol + │ ├── xmc_cage_temp0 + │ ├── xmc_cage_temp1 + │ ├── xmc_cage_temp2 + │ ├── xmc_cage_temp3 + │ ├── xmc_ddr_vpp_btm + │ ├── xmc_ddr_vpp_top + │ ├── xmc_dimm_temp0 + │ ├── xmc_dimm_temp1 + │ ├── xmc_dimm_temp2 + │ ├── xmc_dimm_temp3 + │ ├── xmc_fan_rpm + │ ├── xmc_fan_temp + │ ├── xmc_fpga_temp + │ ├── xmc_mgt0v9avcc + │ ├── xmc_mgtavtt + │ ├── xmc_se98_temp0 + │ ├── xmc_se98_temp1 + │ ├── xmc_se98_temp2 + │ ├── xmc_sys_5v5 + │ ├── xmc_vcc1v2_btm + │ ├── xmc_vccint_curr + │ └── xmc_vccint_vol + ├── xpr + ├── xrt_mgmt + │ └── xclmgmt1024 + │ ├── dev + │ ├── device -> ../../../0000:04:00.0 + │ ├── power + │ │ ├── async + │ │ ├── autosuspend_delay_ms + │ │ ├── control + │ │ ├── runtime_active_kids + │ │ ├── runtime_active_time + │ │ ├── runtime_enabled + │ │ ├── runtime_status + │ │ ├── runtime_suspended_time + │ │ └── runtime_usage + │ ├── subsystem -> ../../../../../../class/xrt_mgmt + │ └── uevent + └── xvc_pri.m.1024 + ├── driver -> ../../../../../bus/platform/drivers/xvc.m + ├── driver_override + ├── modalias + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── subsystem -> ../../../../../bus/platform + ├── uevent + └── xrt_mgmt + └── xvc_pri.m66560 + ├── dev + ├── device -> ../../../xvc_pri.m.1024 + ├── power + │ ├── async + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_kids + │ ├── runtime_active_time + │ ├── runtime_enabled + │ ├── runtime_status + │ ├── runtime_suspended_time + │ └── runtime_usage + ├── subsystem -> ../../../../../../../class/xrt_mgmt + └── uevent + + 71 directories, 364 files + + +zocl +==== + +Similar to PCIe drivers ``zocl`` driver used in embedded platforms +exposes various sections of the ``xclbin`` image +including the ``xclbinuuid`` on ``sysfs``. This makes it very +convenient for tools (such as ``xbutil``) to discover characteristics +of the image currently loaded on the FPGA. The data layout of ``xclbin`` +sections are defined in file ``xclbin.h`` which can be found under +``runtime/core/include`` directory. + +Sample output of tree command below:: + + mpsoc:/sys/bus/platform/devices/amba>tree zyxclmm_drm + zyxclmm_drm + ├── connectivity + ├── debug_ip_layout + ├── ip_layout + ├── kds_custat + ├── memstat + ├── memstat_raw + ├── mem_topology + ├── modalias + ├── of_node + │ ├── compatible + │ ├── name + │ ├── reg + │ └── status + ├── power + │ ├── autosuspend_delay_ms + │ ├── control + │ ├── runtime_active_time + │ ├── runtime_status + │ └── runtime_suspended_time + ├── uevent + └── xclbinid diff --git a/2024.2/html/_sources/system_requirements.rst.txt b/2024.2/html/_sources/system_requirements.rst.txt new file mode 100644 index 00000000000..419cff1225e --- /dev/null +++ b/2024.2/html/_sources/system_requirements.rst.txt @@ -0,0 +1,47 @@ +.. _system_requirements.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +System Requirements +------------------- + +Host Platform for PCIe Accelerator Cards +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. x86_64 +2. AARCH64 +3. PPC64LE + +Supported Xilinx® Accelerator Cards are listed in :doc:`platforms` . + + +XRT Software Stack for PCIe Accelerator Cards +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +XRT software stack requires Linux kernel 3.10+. + +The XRT software stack is tested on RHEL/CentOS and Ubuntu. +For the detailed list of supported OS, please refer to the specific release versions of `UG1451 XRT Release Notes `_. + +XRT is needed on both application development and deployment environments. + +To install XRT on the host, please refer to page :doc:`install`. for dependencies installation steps and XRT installation steps. + +To build a custom XRT package, please refer to page :doc:`build`. for dependencies installation steps and building steps. + +XRT Software Stack for Embedded Platforms +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +XRT software stack requires Linux kernel 3.10+. XRT for embedded platforms is tested with PetaLinux. + +XRT needs to be installed on the development environment (rootfs or sysroot) and deployment environment (rootfs) of embedded platforms. + +If embedded processor native compile is to be used, XRT, xrt-dev and GCC needs to be installed on the target embedded system rootfs. + +If application is developed on a server with cross compiling technique, XRT needs to be installed into sysroot. The application can be cross compiled against the sysroot. +XRT for server is not required on the cross compile server. + +The embedded platform for deployment should have XRT and ZOCL installed. For details about building embedded platforms please refer to :doc:`yocto`. diff --git a/2024.2/html/_sources/test.rst.txt b/2024.2/html/_sources/test.rst.txt new file mode 100644 index 00000000000..1fb9361dde7 --- /dev/null +++ b/2024.2/html/_sources/test.rst.txt @@ -0,0 +1,133 @@ +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2022 Xilinx, Inc. All rights reserved. + +Developer Build and Test Instructions +------------------------------------- + +Switching XRT development work from P4 to Git can be done without much +downtime provided you use a few scripts we have created: + +- ``build.sh`` build script that builds XRT for both Debug and Release profiles. +- ``run.sh`` loader script that sets up environment assuming XRT was + built with ``build.sh``. + +Building XRT +~~~~~~~~~~~~ + +Building XRT is tested on Ubuntu 20.04/22.04, CentOs 7.9+ and RHEL9 host OS. + +It is probably safest if you keep your Git clone of XRT on a network +mounted drive that can be accessed from different hosts. One +advantage is that you can have your editor run on a host that is not +used for board testing, since you don't really want host/driver +crashes to leave your unsaved edits in limbo. + +:: + + git clone https://github.com/Xilinx/XRT.git + cd XRT/build + ./build.sh + +``build.sh`` script builds for both Debug and Release profiles. It is +necessary to use the build script if you intend to use the loader +script ``run.sh`` and the board testing script ``board.sh``. + +For the normal development flow, it is not necessary to build RPM or +DEB packages. The loader and test scripts both work by +setting the environment to point at the binaries created by the build +script. + +Running XRT +~~~~~~~~~~~ + +To run your locally built XRT with a sample ``host.exe`` and +``kernel.xclbin``, simply prepend your command line invocation with +``XRT/build/run.sh`` + +:: + + /XRT/build/run.sh ./host.exe kernel.xclbin + +By default the ``run.sh`` script uses the binaries from the Release +profile. In order run with the binaries from Debug profile use ``-dbg`` +flag; this way you can even start your favorite debugger by prefixing its +invocation with ``run.sh -dbg`` + +:: + + /XRT/build/run.sh -dbg emacs + + +Testing XRT +~~~~~~~~~~~ + +After making changes to XRT in your Git clone, rebuild with +``build.sh`` as explained above, then run a full set of pre-harvested +board tests using the ``board.sh`` script. + +The script is not supported, and the havesting part of the script no longer +works. Assuming tests are harvested manually, then a bit of reverse engineering +is needed to figure out how to organize these tests such that they can be +run with ``board.sh``. + +Assuming all is well, the the board script will +run all tests that organized under current directory. + +While tests run, a file named ``results.all`` will list the test with +``PASS``\ /\ ``FAIL`` keyword. This file is appended (not removed +between runs). A complete run should take 5-10 mins for approximately +70 tests. + + +Unit Testing XRT +~~~~~~~~~~~~~~~~ + +We use GTest to do unit testing. The GTest package is installed by +running ``XRT/src/runtime_src/tools/scripts/xrtdeps.sh``. + +The GTest package on CentOS/RHEL 7.5 provides the GTest libraries +here: + + * ``/usr/lib64/libgtest.so`` + * ``/usr/lib64/libgtest_main.so`` + +In recent versions of Ubuntu, the GTest ``libgtest-dev`` package +provides the compiled libraries in + + * ``/usr/lib/x86_64-linux-gnu/libgtest.a`` + * ``/usr/lib/x86_64-linux-gnu/libgtest_main.a`` + +However, the GTest package on Ubuntu up to 18.04 provides source only! +So, to use GTest on older Ubuntu versions, use:: + + cd /usr/src/gtest + sudo cmake CMakeLists.txt + sudo make + cd /usr/lib + sudo ln -s /usr/src/gtest/libgtest.a + sudo ln -s /usr/src/gtest/libgtest_main.a + # Validate: + ls *gtest* + +This will add GTest static library symbolic links here: + + * ``/usr/lib/libgtest.a`` + * ``/usr/lib/libgtest_main.a`` + +CMake will handle linking, finding etc. for you. + +To add GTest support to a ``CMakeLists.txt`` use the following, and this is using +an example executable called ``xclbintest``:: + + find_package(GTest) + if (GTEST_FOUND) + enable_testing() + message (STATUS "GTest include dirs: '${GTEST_INCLUDE_DIRS}'") + include_directories(${GTEST_INCLUDE_DIRS}) + add_executable(xclbintest unittests/main.cpp unittests/test.cpp) + message (STATUS "GTest libraries: '${GTEST_BOTH_LIBRARIES}'") + target_link_libraries(xclbintest ${GTEST_BOTH_LIBRARIES} pthread) + else() + message (STATUS "GTest was not found, skipping generation of test executables") + endif() diff --git a/2024.2/html/_sources/vsec.rst.txt b/2024.2/html/_sources/vsec.rst.txt new file mode 100644 index 00000000000..96dc36bc91f --- /dev/null +++ b/2024.2/html/_sources/vsec.rst.txt @@ -0,0 +1,73 @@ +.. _vsec.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + + +Accessing vsec within VM +~~~~~~~~~~~~~~~~~~~~~~~~ + +Background +########## + +vsec is introduced to support raptor based shell design. It is in the extended config space of the pcie node. + +In cloud environment where the pcie device is passed-through to the VM, if the emulated chipset doesn't support pcie, then the extended config space can't be accessed. Those cards with raptor based shell will not working then. + +By default, the kvm will choose chipset 'i440', which doesn't have pcie support. This post will show on Linux how to choose a newer type of chipset – 'q35', which has pcie support. + +Creating VM with virt-manager +############################# + + - run ``qemu-system-x86_64 -machine help``, and see q35 is supported. otherwise, it means the qemu version is too old. Please upgrade qemu to newer version + - start virt-manager, during VM creation, make sure choose 'Customize configuration before install' + + .. image:: q35-0_LI.jpg + :align: center + + - Before 'Begin installation', make sure the chipset is 'q35' + + .. image:: q35-1_LI.jpg + :align: center + + + +Note: If you already created the VM with chipset 'i440', you can manually edit the XML config file of the VM to change the chipset to 'q35', there are quite several parts need to change. + +Pass through FPGA user PF to VM +############################### + +You can pass-through the FPGA do vm during vm creation. In the 'Customize configuration' window, add FPGA user PF + +.. image:: pass-through.PNG + :align: center + + +You can also do it after the vm is created. + +pci path through user PF to kvm guest +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +.. code-block:: shell + + Host># virsh edit name_of_vm + #add following to the definition xml file of the vm + + + +
+ +
+ + +Here, ``domain='0x0000' bus='0x03' slot='0x00' function='0x1'`` is the dbdf of the user PF in host. while ``domain='0x0000' bus='0x00' slot='0x09' function='0x0'`` is the dbdf info assigned in vm. we set ``managed = 'yes'``, so that when vm gets started, the PF(s) are automatically assigned to vm and when vm is destroyed, the vm is reassigned back to host. + +If want to do hot-plug, we should not set ``manage = 'yes'`` + +More details, pls refer + +https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/virtualization/chap-virtualization-pci_passthrough + + diff --git a/2024.2/html/_sources/xball.rst.txt b/2024.2/html/_sources/xball.rst.txt new file mode 100644 index 00000000000..c5e511bd810 --- /dev/null +++ b/2024.2/html/_sources/xball.rst.txt @@ -0,0 +1,27 @@ +.. _xball.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2021 Xilinx, Inc. All rights reserved. + +xball +===== + +To facilitate running the same ``xbutil`` or ``xbmgmt`` command on a multi-card environment XRT provides a utility script named ``xball``. The script ``xball`` can be used as below: + +.. code-block:: shell + + #xball + xball xbutil examine + +The ``xball`` script will detect all the cards attached to the server, and execute the ``xbutil`` or ``xbmgmt`` command on all of them. + +Additionally, the ``xball`` script provides a filtering option to execute the command on some specific cards, such as + +.. code-block:: shell + + #Run `xbutil examine` command only on U30 cards + xball --device-filter 'u30' xbutil examine + + #Run `xbutil examine` command only on U250 cards + xball --device-filter '^xilinx_u250' xbutil examine diff --git a/2024.2/html/_sources/xbflash2.rst.txt b/2024.2/html/_sources/xbflash2.rst.txt new file mode 100644 index 00000000000..27a4c720300 --- /dev/null +++ b/2024.2/html/_sources/xbflash2.rst.txt @@ -0,0 +1,229 @@ +.. _xbflash2.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2022 Xilinx, Inc. All rights reserved. + +xbflash2 +======== + +The Xilinx (R) Board Flash utility (xbflash2) is a standalone command line utility to flash a custom image onto given device. This document describes the latest ``xbflash2`` commands. + +In 2022.1 release, this utility is Early Access with limited validation. + +This tool supported for all Alveo platforms. + +This tool doesn't require XRT Package and it doesn't come with XRT package, it comes as separate xbflash package. + +``xbflash2`` tool is available in the Alveo card web page, getting started session, xbflash2 tab. + +For example: https://www.xilinx.com/products/boards-and-kits/alveo/u50.html#xbflash2 + +After xbflash package installation, content goes to ``/usr/local/bin`` + +This tool is verified and supported only on XDMA PCIe DMA designs. + +**Global options**: These are the global options can be used with any command. + + - ``--help`` : Get help message to use this application. + - ``--verbose``: Turn on verbosity and shows more outputs whenever applicable. + - ``--batch``: Enable batch mode. + - ``--force``: When possible, force an operation. + +Currently supported ``xbflash2`` commands are + + - ``xbflash2 program`` + - ``xbflash2 dump`` + + +xbflash2 program +~~~~~~~~~~~~~~~~ + +The ``xbflash2 program`` command programs the given acceleration image into the device's shell. + +**The supported options** + +Updates the image(s) for a given device. + +.. code-block:: shell + + xbflash2 program [--help|-h] --[ spi | qspips ] [commandArgs] + +**The details of the supported options** + +- The ``--help`` (or ``-h``) gets help message tp use this sub-command. +- The ``--spi`` option is used for spi flash type. +- The ``--qspips`` option is used for qspips flash type. + + +xbflash2 program --spi +~~~~~~~~~~~~~~~~~~~~~~ + +The ``xbflash2 program --spi`` command programs the given acceleration image into the device's shell for spi flash type. + +**The supported usecases and their options** + +Program the image(.mcs) to the device's shell. + +.. code-block:: shell + + xbflash2 program --spi [--image|-i] <.mcs file path> [--device|-d] [--dual-flash|-u] [--bar|-b] [--bar-offset|-s] + +Revert to golden image. Resets the FPGA PROM back to the factory image. + +.. code-block:: shell + + xbflash2 program --spi [--revert-to-golden|-r] [--device|-d] [--dual-flash|-u] [--bar|-b] [--bar-offset|-s] + + +**The details of the supported options** + +- The ``--device`` (or ``-d``) specifies the target device to program + + - : The Bus:Device.Function of the device of interest + +- The ``--dual-flash`` (or ``-u``) option specifies if the card is dual flash supported. + +- The ``--bar`` (or ``-b``) option specifies BAR-index, default is 0. + +- The ``--bar-offset`` (or ``-s``) option specifies BAR-offset, default is 0x40000. + +- The ``--image`` (or ``-i``) option Specifies MCS image path to update the persistent device. + +- The ``--revert-to-golden`` (or ``-r``) command is used to reverts the flash image back to the golden version of the card. + + +**Example commands** + + +.. code-block:: shell + + #Program the mcs image. + xbflash2 program --spi --device 0000:3b:00.0 --image + + #Program the mcs image. + xbflash2 program --spi --device 0000:3b:00.0 --image --bar 0 --bar-offset 0x10000 + + #Program the image for dual-flash type. + xbflash2 program --spi --device 0000:5e:00.1 --image --image --bar 0 --bar-offset 0x40000 --dual-flash + + #Revert to golden image + xbflash2 program --spi --device 0000:d8:00.0 --revert-to-golden --bar 0 --bar-offset 0x40000 --dual-flash + + +xbflash2 program --qspips +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``xbflash2 program --qspips`` command programs the given acceleration image into the device's shell for qspips flash type. + +**The supported usecases and their options** + +Program the image(boot.bin) to the device's shell. + +.. code-block:: shell + + xbflash2 program --qspips [--image|-i] [--device|-d] [-offset|-a] [--flash-part|-p] [--bar|-b] [--bar-offset|-s] + +Erase flash on the device. + +.. code-block:: shell + + xbflash2 program --qspips [--erase|-e] [--length|-l] [--device|-d] [-offset|-a] [--flash-part|-p] [--bar|-b] [--bar-offset|-s] + + +**The details of the supported options** + +- The ``--device`` (or ``-d``) specifies the target device to program + + - : The Bus:Device.Function of the device of interest + +- The ``--offset`` (or ``-a``) option specifies offset on flash to start, default is 0. + +- The ``--flash-part`` (or ``-p``) option specifies qspips-flash-type, default is qspi_ps_x2_single. + +- The ``--bar`` (or ``-b``) option specifies BAR-index for qspips, default is 0. + +- The ``--bar-offset`` (or ``-s``) option specifies BAR-offset for qspips, default is 0x40000. + +- The ``--length`` (or ``-l``) option specifies length-to-erase, default is 96MB. + +- The ``--image`` (or ``-i``) option specifies boot.bin image path to update the persistent device. + +- The ``--erase`` (or ``-e``) command is used to erase flash on the device. + + +**Example commands** + + +.. code-block:: shell + + #Program the boot.bin image. + xbflash2 program --qspips --device 0000:3b:00.0 --image + + #Program the boot.bin image. + xbflash2 program --qspips --device 0000:3b:00.0 --image --offset 0x0 --bar-offset 0x10000 --bar 0 + + #Erase flash on the device + xbflash2 program --spi --device 0000:d8:00.0 --erase --length 0x06000000 --offset 0x0 --bar 0 --bar-offset 0x40000 + + +xbflash2 dump +~~~~~~~~~~~~~ + +The ``xbflash2 dump`` command reads the image(s) for a given device for a given length and outputs the same to given file. It is applicable for only QSPIPS flash.. + +**The supported options** + +Reads the image(s) for a given device and dump out content of the specified option. + +.. code-block:: shell + + xbflash2 dump [--help|-h] --[ qspips ] [commandArgs] + +**The details of the supported options** + +- The ``--help`` (or ``-h``) gets help message tp use this sub-command. +- The ``--qspips`` option is used for qspips flash type. + + +xbflash2 dump --qspips +~~~~~~~~~~~~~~~~~~~~~~ + +The ``xbflash2 dump --qspips`` command dump out content to the given ouput file. + +**The supported usecases and their options** + +Reads the image(s) for a given device for a given length and outputs the same to given file. + +.. code-block:: shell + + xbflash2 dump --qspips [--device|-d] [-offset|-a] [--length|-l] [--flash-part|-p] [--bar|-b] [--bar-offset|-s] [--output|-o] + +**The details of the supported options** + +- The ``--device`` (or ``-d``) specifies the target device to program + + - : The Bus:Device.Function of the device of interest + +- The ``--offset`` (or ``-a``) option specifies offset on flash to start, default is 0. + +- The ``--length`` (or ``-l``) option specifies length-to-read, default is 128MB. + +- The ``--flash-part`` (or ``-p``) option specifies qspips-flash-type, default is qspi_ps_x2_single. + +- The ``--bar`` (or ``-b``) option specifies BAR-index for qspips, default is 0. + +- The ``--bar-offset`` (or ``-s``) option specifies BAR-offset for qspips, default is 0x40000. + +- The ``--output`` (or ``-o``) option to specify output file path to save read contents.. + + +**Example commands** + + +.. code-block:: shell + + + #Dump out content to the given ouput file + xbflash2 dump --qspips --device 0000:3b:00.0 --offset 0x0 --length 0x08000000 --bar-offset 0x10000 --bar 0 --output /tmp/flash_dump.txt + diff --git a/2024.2/html/_sources/xbmgmt.rst.txt b/2024.2/html/_sources/xbmgmt.rst.txt new file mode 100644 index 00000000000..3c6396b3045 --- /dev/null +++ b/2024.2/html/_sources/xbmgmt.rst.txt @@ -0,0 +1,322 @@ +.. _xbmgmt.rst: + +.. + comment:: SPDX-License-Identifier: Apache-2.0 + comment:: Copyright (C) 2019-2022 Xilinx, Inc. All rights reserved. + +xbmgmt +====== + +This document describes the latest ``xbmgmt`` commands. These latest commands are default from 21.1 release. + + + +For an instructive video on xbmgmt commands listed below click `here `_. + +**Global options**: These are the global options can be used with any command. + + - ``--verbose``: Turn on verbosity and shows more outputs whenever applicable + - ``--batch``: Enable batch mode + - ``--force``: When possible, force an operation + - ``--help`` : Get help message + - ``--version`` : Report the version of XRT and its drivers + +Currently supported ``xbmgmt`` commands are + + - ``xbmgmt configure`` + - ``xbmgmt dump`` + - ``xbmgmt examine`` + - ``xbmgmt program`` + - ``xbmgmt reset`` + +**Note**: For applicable commands, if only one device is present on the system ``--device`` (or ``-d``) is not required. If more than one device is present in the system, ``--device`` (or ``-d``) is required. + + +xbmgmt configure +~~~~~~~~~~~ + +The ``xbmgmt configure`` command provides advanced options for configuring a device's memory, clock, and DDR memory retention settings. A .ini file input is required for configuration except for DDR memory retention. The .ini file can be located in a directory of your choosing with the path specified in the command. + +- TIP: Instead of creating a .ini file from scratch, use ``xbmgmt dump --config`` (see the ``xbmgmt dump`` section) to generate the file contents which can be edited accordingly. + +**Command Options** + +The supported options are ``--input`` and ``--retention``. Command usage is below. + +.. code-block:: shell + + xbmgmt configure [--device| -d] [--input] + + +Enabling/Disabling clock throttling on a device + +- When enabled, clock throttling reduces the kernel clock frequency dynamically when either thermal or electrical sensors exceed defined threshold values. By lowering the clock frequency, clock throttling reduces the required power and subsequently generated heat. Only when all sensor values fall below their respective clock throttling threshold values will the kernel clock be restored to full performance. +- Default clock throttling threshold values are available in ` `_ for supported platforms. +- The contents of the .ini file for clock throttling configuration should be similar to the example provided below. Underneath the first line, ``[Device]``, specify one or more key-value pairings as needed. + +.. code-block:: ini + + [Device] + throttling_enabled=true + throttling_power_override=200 + throttling_temp_override=90 + +- The definition of the three key-value pairings are given below. + + - ``throttling_enabled`` : When set to ``true``, clock throttling will be enabled. When set to ``false``, clock throttling will be disabled, and no clock throttling will occur. The default value is ``false``. + - ``throttling_power_override`` : Provide a power threshold override in watts for clock throttling to activate. The default threshold value is given in ` `_. + - ``throttling_temp_override`` : Provide a temperature threshold override in Celsius for clock throttling to activate. The default threshold value is given in ` `_. + +- If a pairing is not listed in the .ini file, the default value (or the updated value from previous usage of ``xbmgmt configure --input``) is used. +- Thresholds can be set higher or lower as necessary (e.g. debugging purposes). Note that cards still have built-in card and clock shutdown logic with independent thresholds to protect the cards. +- To check clock throttling settings, use ``xbmgmt examine`` with the ``cmc`` report. + + +Enabling/Disabling DDR memory retention on a device + +.. code-block:: shell + + xbmgmt configure [--device| -d] --retention [ENABLE|DISABLE] + + +**The details of the supported options** + +- The ``--device`` (or ``-d``) specifies the target device + + - : The Bus:Device.Function of the device of interest + + +- The ``--input`` specifies an INI file with configuration details (e.g. memory, clock throttling). +- The ``--retention`` option enables / disables DDR memory retention. + + +**Example commands** + + +.. code-block:: shell + + + #Configure a device's memory settings using an image + xbmgmt configure --device 0000:b3:00.0 --input /tmp/memory_config.ini + + #Configure a device using edited output .ini from xbmgmt dump --config + xbmgmt configure --device 0000:b3:00.0 --input /tmp/config.ini + + #Enable a device's DDR memory retention + xbmgmt configure --device 0000:b3:00.0 --retention ENABLE + + +xbmgmt dump +~~~~~~~~~~~ + +The ``xbmgmt dump`` command dumps out content of the specified option + +**The supported options** + +Dumping the output of system configuration. + +.. code-block:: shell + + xbmgmt dump [--device| -d] [--config| -c] [--output| -o] + + +Dumping the output of programmed system image + +.. code-block:: shell + + xbmgmt dump [--device| -d] [--flash| -f] [--output| -o] + + +**The details of the supported options** + +- The ``--device`` (or ``-d``) specifies the target device + + - : The Bus:Device.Function of the device of interest + + +- The ``--flash`` (or ``-f``) option dumps the output of programmed system image. Requires a .bin output file by ``-o`` option. +- The ``--config`` (or ``-c``) option dumps the output of system configuration. Requires a .ini output file by ``-o`` option. +- The ``--output`` (or ``-o``) specifies the output file to direct the dumped output. + + +**Example commands** + + +.. code-block:: shell + + + #Dump programmed system image data + xbmgmt dump --device 0000:b3:00.0 --flash -o /tmp/flash_dump.bin + + #Dump system configuration. This .ini file can be edited and used as input for xbmgmt configure. + xbmgmt dump --device 0000:b3:00.0 --config -o /tmp/config_dump.ini + + #Example .ini file contents from xbmgmt dump --config. + #Only edit the throttling_enabled, throttling_power_override, and throttling_temp_override values when editing clock throttling settings. + [Device] + mailbox_channel_disable=0x0 + mailbox_channel_switch=0x0 + xclbin_change=0 + cache_xclbin=0 + throttling_enabled=true + throttling_power_override=200 + throttling_temp_override=90 + + +xbmgmt examine +~~~~~~~~~~~~~~ + +The ``xbmgmt examine`` command reports detail status information of the specified device `