@@ -55,6 +55,7 @@ tab-size = 4
5555#include < ranges>
5656#include < string>
5757#include < unordered_set>
58+ #include < cstring>
5859
5960#include < fmt/format.h>
6061
@@ -124,14 +125,17 @@ namespace Mem {
124125 };
125126
126127namespace Gpu {
127- // List of all GPU information
128+ // ? List of all GPU information
128129 std::vector<gpu_info> gpus;
129130#ifdef GPU_SUPPORT
130131 namespace Agx {
131132 bool initialized = false ;
133+
132134 size_t device_count = 0 ;
133- IOGPU io_gpu;
134- bool init ();
135+ // ? IO_GPU contains the actual vector with the list of GPUs and their data to perform the query.
136+ IOGPU io_gpu;
137+
138+ bool init ();
135139 bool shutdown ();
136140 template <bool is_init>
137141 bool collect (gpu_info* gpus_slice, size_t i = 0 );
@@ -1473,11 +1477,8 @@ namespace Tools {
14731477namespace Gpu {
14741478
14751479#ifdef GPU_SUPPORT
1476- // ----------------------------------------
1477- // Apple Silicon specific GPU handling
1478- // ----------------------------------------
14791480 namespace Agx {
1480- // Initialize Apple Silicon GPU monitoring
1481+ // ? Initialize Apple Silicon GPU monitoring Although the chips always have 1 GPU, I assume we can reuse them later on Intel Macs.
14811482 bool init () {
14821483 const size_t index = gpus.size ();
14831484 auto & io_gpus = io_gpu.getGPUs ();
@@ -1496,13 +1497,13 @@ namespace Gpu {
14961497 return true ;
14971498 }
14981499
1499- // Shutdown Apple Silicon GPU monitoring
1500+ // ? Shutdown Apple Silicon GPU monitoring
15001501 bool shutdown () {
15011502 initialized = false ;
15021503 return true ;
15031504 }
15041505
1505- // Collect GPU metrics into the provided slice
1506+ // ? Collect GPU metrics into the provided slice
15061507 template <bool is_init>
15071508 bool collect (gpu_info* gpus_slice, size_t index) {
15081509 if (!initialized)
@@ -1512,20 +1513,18 @@ namespace Gpu {
15121513 gpus_slice->supported_functions = {
15131514 .gpu_utilization = true ,
15141515 .mem_utilization = false ,
1515- .mem_total = true ,
1516- .mem_used = true ,
1517-
1518- .pwr_usage = IOReport::LibHandle ? true : false ,
15191516 .gpu_clock = IOReport::LibHandle ? true : false ,
1520- .temp_info = false , // IOReport::LibHandle ? true : false,
1521-
15221517 .mem_clock = false ,
1518+ .pwr_usage = IOReport::LibHandle ? true : false ,
15231519 .pwr_state = false ,
1520+ .temp_info = false , // IOReport::LibHandle ? true : false
1521+ .mem_total = true ,
1522+ .mem_used = true ,
15241523 .pcie_txrx = false ,
15251524 .encoder_utilization = false ,
1526- .decoder_utilization = false
1525+ .decoder_utilization = false
15271526 };
1528- gpus_slice->pwr_max_usage = 30'000 ;
1527+ gpus_slice->pwr_max_usage = 30'000 ; // ? 30w
15291528 }
15301529
15311530 auto & io_gpus = io_gpu.getGPUs ();
@@ -1561,10 +1560,8 @@ namespace Gpu {
15611560
15621561 } // namespace Agx
15631562
1564- // ----------------------------------------
1565- // Collect GPU metrics (top-level function)
1566- // ----------------------------------------
1567- // Full based on linux (intel/amd) gpu collect
1563+
1564+ // ? Full based (copyed) on linux (intel/amd) gpu collect
15681565 auto collect (bool no_update) -> std::vector<gpu_info>& {
15691566 if (Runner::stopping || (no_update && !gpus.empty ()))
15701567 return gpus;
0 commit comments