|
1 | 1 | #include "timing_reports.h"
|
2 | 2 |
|
| 3 | +#include <fstream> |
| 4 | +#include <sstream> |
| 5 | + |
| 6 | +#include "timing_reports.h" |
| 7 | +#include "rr_graph.h" |
| 8 | + |
3 | 9 | #include "tatum/TimingReporter.hpp"
|
4 | 10 |
|
| 11 | +#include "vtr_version.h" |
5 | 12 | #include "vpr_types.h"
|
6 | 13 | #include "globals.h"
|
7 | 14 |
|
|
10 | 17 |
|
11 | 18 | #include "VprTimingGraphResolver.h"
|
12 | 19 |
|
| 20 | +/** |
| 21 | + * @brief Get the bounding box of a routed net. |
| 22 | + * If the net is completely absorbed into a cluster block, return the bounding box of the cluster block. |
| 23 | + * Otherwise, return the bounding box of the net's route tree. |
| 24 | + * |
| 25 | + * @param atom_net_id The id of the atom net to get the bounding box of. |
| 26 | + * |
| 27 | + * @return The bounding box of the net. If the net is not routed, a bounding box |
| 28 | + * is returned with default values (OPEN). |
| 29 | + */ |
| 30 | +static t_bb get_net_bounding_box(const AtomNetId atom_net_id) { |
| 31 | + const auto& route_trees = g_vpr_ctx.routing().route_trees; |
| 32 | + const auto& rr_graph = g_vpr_ctx.device().rr_graph; |
| 33 | + |
| 34 | + // Lambda to get the bounding box of a route tree |
| 35 | + auto route_tree_bb = [&](const RouteTree& route_tree) { |
| 36 | + t_bb bb; |
| 37 | + |
| 38 | + // Set the initial bounding box to the root node's location |
| 39 | + RRNodeId route_tree_root = route_tree.root().inode; |
| 40 | + bb.xmin = rr_graph.node_xlow(route_tree_root); |
| 41 | + bb.xmax = rr_graph.node_xhigh(route_tree_root); |
| 42 | + bb.ymin = rr_graph.node_ylow(route_tree_root); |
| 43 | + bb.ymax = rr_graph.node_yhigh(route_tree_root); |
| 44 | + bb.layer_min = rr_graph.node_layer(route_tree_root); |
| 45 | + bb.layer_max = rr_graph.node_layer(route_tree_root); |
| 46 | + |
| 47 | + // Iterate over all nodes in the route tree and update the bounding box |
| 48 | + for (auto& rt_node : route_tree.all_nodes()) { |
| 49 | + RRNodeId inode = rt_node.inode; |
| 50 | + |
| 51 | + bb.xmin = std::min(static_cast<int>(rr_graph.node_xlow(inode)), bb.xmin); |
| 52 | + bb.xmax = std::max(static_cast<int>(rr_graph.node_xhigh(inode)), bb.xmax); |
| 53 | + |
| 54 | + bb.ymin = std::min(static_cast<int>(rr_graph.node_ylow(inode)), bb.ymin); |
| 55 | + bb.ymax = std::max(static_cast<int>(rr_graph.node_yhigh(inode)), bb.ymax); |
| 56 | + |
| 57 | + bb.layer_min = std::min(static_cast<int>(rr_graph.node_layer(inode)), bb.layer_min); |
| 58 | + bb.layer_max = std::max(static_cast<int>(rr_graph.node_layer(inode)), bb.layer_max); |
| 59 | + } |
| 60 | + return bb; |
| 61 | + }; |
| 62 | + |
| 63 | + if (g_vpr_ctx.routing().is_flat) { |
| 64 | + // If flat router is used, route tree data structure can be used |
| 65 | + // directly to get the bounding box of the net |
| 66 | + const auto& route_tree = route_trees[atom_net_id]; |
| 67 | + if (!route_tree) |
| 68 | + return t_bb(); |
| 69 | + return route_tree_bb(*route_tree); |
| 70 | + } else { |
| 71 | + // If two-stage router is used, we need to first get the cluster net id |
| 72 | + // corresponding to the atom net and then get the bounding box of the net |
| 73 | + // from the route tree. If the net is completely absorbed into a cluster block, |
| 74 | + const auto& atom_lookup = g_vpr_ctx.atom().lookup(); |
| 75 | + const auto& cluster_net_id = atom_lookup.clb_nets(atom_net_id); |
| 76 | + std::vector<t_bb> bbs; |
| 77 | + t_bb max_bb; |
| 78 | + // There maybe multiple cluster nets corresponding to a single atom net. |
| 79 | + // We iterate over all cluster nets and the final bounding box is the union |
| 80 | + // of all cluster net bounding boxes |
| 81 | + if (cluster_net_id != vtr::nullopt) { |
| 82 | + for (const auto& clb_net_id : *cluster_net_id) { |
| 83 | + const auto& route_tree = route_trees[clb_net_id]; |
| 84 | + if (!route_tree) |
| 85 | + continue; |
| 86 | + bbs.push_back(route_tree_bb(*route_tree)); |
| 87 | + } |
| 88 | + if (bbs.empty()) { |
| 89 | + return t_bb(); |
| 90 | + } |
| 91 | + // Assign the first cluster net's bounding box to the final bounding box |
| 92 | + // and then iteratively update it with the union of bounding boxes of |
| 93 | + // all cluster nets |
| 94 | + max_bb = bbs[0]; |
| 95 | + for (size_t i = 1; i < bbs.size(); ++i) { |
| 96 | + max_bb.xmin = std::min(bbs[i].xmin, max_bb.xmin); |
| 97 | + max_bb.xmax = std::max(bbs[i].xmax, max_bb.xmax); |
| 98 | + max_bb.ymin = std::min(bbs[i].ymin, max_bb.ymin); |
| 99 | + max_bb.ymax = std::max(bbs[i].ymax, max_bb.ymax); |
| 100 | + max_bb.layer_min = std::min(bbs[i].layer_min, max_bb.layer_min); |
| 101 | + max_bb.layer_max = std::max(bbs[i].layer_max, max_bb.layer_max); |
| 102 | + } |
| 103 | + return max_bb; |
| 104 | + } else { |
| 105 | + // If there is no cluster net corresponding to the atom net, |
| 106 | + // it means the net is completely absorbed into a cluster block. |
| 107 | + // In that case, we set the bounding box the cluster block's bounding box |
| 108 | + const auto& atom_ctx = g_vpr_ctx.atom(); |
| 109 | + const auto& atom_nlist = atom_ctx.netlist(); |
| 110 | + AtomPinId source_pin = atom_nlist.net_driver(atom_net_id); |
| 111 | + |
| 112 | + AtomBlockId atom_block = atom_nlist.pin_block(source_pin); |
| 113 | + VTR_ASSERT(atom_block != AtomBlockId::INVALID()); |
| 114 | + ClusterBlockId cluster_block = atom_lookup.atom_clb(atom_block); |
| 115 | + VTR_ASSERT(cluster_block != ClusterBlockId::INVALID()); |
| 116 | + |
| 117 | + const t_pl_loc& cluster_block_loc = g_vpr_ctx.placement().block_locs()[cluster_block].loc; |
| 118 | + const auto& grid = g_vpr_ctx.device().grid; |
| 119 | + vtr::Rect<int> tile_bb = grid.get_tile_bb({cluster_block_loc.x, cluster_block_loc.y, cluster_block_loc.layer}); |
| 120 | + const int block_layer = cluster_block_loc.layer; |
| 121 | + return t_bb(tile_bb.xmin(), |
| 122 | + tile_bb.xmax(), |
| 123 | + tile_bb.ymin(), |
| 124 | + tile_bb.ymax(), |
| 125 | + block_layer, |
| 126 | + block_layer); |
| 127 | + } |
| 128 | + } |
| 129 | +} |
| 130 | + |
13 | 131 | void generate_setup_timing_stats(const std::string& prefix,
|
14 | 132 | const SetupTimingInfo& timing_info,
|
15 | 133 | const AnalysisDelayCalculator& delay_calc,
|
@@ -61,3 +179,55 @@ void generate_hold_timing_stats(const std::string& prefix,
|
61 | 179 |
|
62 | 180 | timing_reporter.report_unconstrained_hold(prefix + "report_unconstrained_timing.hold.rpt", *timing_info.hold_analyzer());
|
63 | 181 | }
|
| 182 | + |
| 183 | +void generate_net_timing_report(const std::string& prefix, |
| 184 | + const SetupHoldTimingInfo& timing_info, |
| 185 | + const AnalysisDelayCalculator& delay_calc) { |
| 186 | + std::ofstream os(prefix + "report_net_timing.csv"); |
| 187 | + const auto& atom_netlist = g_vpr_ctx.atom().netlist(); |
| 188 | + const auto& atom_lookup = g_vpr_ctx.atom().lookup(); |
| 189 | + const auto& timing_ctx = g_vpr_ctx.timing(); |
| 190 | + const auto& timing_graph = timing_ctx.graph; |
| 191 | + |
| 192 | + // Write CSV header |
| 193 | + os << "netname,Fanout,bb_xmin,bb_ymin,bb_layer_min," |
| 194 | + << "bb_xmax,bb_ymax,bb_layer_max," |
| 195 | + << "src_pin_name,src_pin_slack,sinks" << std::endl; |
| 196 | + |
| 197 | + for (const auto& net : atom_netlist.nets()) { |
| 198 | + const auto& net_name = atom_netlist.net_name(net); |
| 199 | + const auto& source_pin = *atom_netlist.net_pins(net).begin(); |
| 200 | + // for the driver/source, this is the worst slack to any fanout. |
| 201 | + auto source_pin_slack = timing_info.setup_pin_slack(source_pin); |
| 202 | + auto tg_source_node = atom_lookup.atom_pin_tnode(source_pin); |
| 203 | + VTR_ASSERT(tg_source_node.is_valid()); |
| 204 | + |
| 205 | + const size_t fanout = atom_netlist.net_sinks(net).size(); |
| 206 | + const auto& net_bb = get_net_bounding_box(net); |
| 207 | + |
| 208 | + os << "\"" << net_name << "\"," // netname (quoted for safety) |
| 209 | + << fanout << "," |
| 210 | + << net_bb.xmin << "," << net_bb.ymin << "," << net_bb.layer_min << "," |
| 211 | + << net_bb.xmax << "," << net_bb.ymax << "," << net_bb.layer_max << "," |
| 212 | + << "\"" << atom_netlist.pin_name(source_pin) << "\"," << source_pin_slack << ","; |
| 213 | + |
| 214 | + // Write sinks column (quoted, semicolon-delimited, each sink: name,slack,delay) |
| 215 | + os << "\""; |
| 216 | + for (size_t i = 0; i < fanout; ++i) { |
| 217 | + const auto& pin = *(atom_netlist.net_pins(net).begin() + i + 1); |
| 218 | + auto tg_sink_node = atom_lookup.atom_pin_tnode(pin); |
| 219 | + VTR_ASSERT(tg_sink_node.is_valid()); |
| 220 | + |
| 221 | + auto tg_edge_id = timing_graph->find_edge(tg_source_node, tg_sink_node); |
| 222 | + VTR_ASSERT(tg_edge_id.is_valid()); |
| 223 | + |
| 224 | + auto pin_setup_slack = timing_info.setup_pin_slack(pin); |
| 225 | + auto pin_delay = delay_calc.max_edge_delay(*timing_graph, tg_edge_id); |
| 226 | + const auto& pin_name = atom_netlist.pin_name(pin); |
| 227 | + |
| 228 | + os << pin_name << "," << pin_setup_slack << "," << pin_delay; |
| 229 | + if (i != fanout - 1) os << ";"; |
| 230 | + } |
| 231 | + os << "\"" << std::endl; // Close quoted sinks field and finish the row |
| 232 | + } |
| 233 | +} |
0 commit comments