@@ -28,52 +28,6 @@ void set_placer_breakpoint_reached(bool flag) {
28
28
f_placer_breakpoint_reached = flag;
29
29
}
30
30
31
- /* *
32
- * @brief Adjust the search range based on the block type and constraints
33
- *
34
- * If the block is an IO block, we expand the search range to include all blocks in the column
35
- * We found empirically that this is a good strategy for IO blocks given they are located in
36
- * the periphery for most FPGA architectures
37
- *
38
- * @param block_type The type of the block to move
39
- * @param block_id The block ID of the moving block
40
- * @param search_range The search range to adjust
41
- * @param delta_cx The delta x of the search range
42
- * @param to_layer_num The layer that the block is moving to
43
- *
44
- * @return true if the search range was adjusted, false otherwise
45
- */
46
- static bool adjust_search_range (t_logical_block_type_ptr block_type,
47
- ClusterBlockId block_id,
48
- t_bb& search_range,
49
- int & delta_cx,
50
- int to_layer_num) {
51
-
52
- auto block_constrained = is_cluster_constrained (block_id);
53
-
54
- if (block_constrained) {
55
- bool intersect = intersect_range_limit_with_floorplan_constraints (block_id,
56
- search_range,
57
- delta_cx,
58
- to_layer_num);
59
- if (!intersect) {
60
- return false ;
61
- }
62
- }
63
-
64
- if (is_io_type (block_type) && !block_constrained) {
65
- /* We empirically found that for the IO blocks,
66
- * Given their sparsity, we expand the y-axis search range
67
- * to include all blocks in the column
68
- */
69
- const t_compressed_block_grid& compressed_block_grid = g_vpr_ctx.placement ().compressed_block_grids [block_type->index ];
70
- search_range.ymin = 0 ;
71
- search_range.ymax = compressed_block_grid.get_num_rows (to_layer_num) - 1 ;
72
- }
73
-
74
- return true ;
75
- }
76
-
77
31
e_create_move create_move (t_pl_blocks_to_be_moved& blocks_affected,
78
32
ClusterBlockId b_from,
79
33
t_pl_loc to,
@@ -1222,6 +1176,37 @@ bool intersect_range_limit_with_floorplan_constraints(ClusterBlockId b_from,
1222
1176
return true ;
1223
1177
}
1224
1178
1179
+ bool adjust_search_range (t_logical_block_type_ptr block_type,
1180
+ ClusterBlockId block_id,
1181
+ t_bb& search_range,
1182
+ int & delta_cx,
1183
+ int to_layer_num) {
1184
+
1185
+ auto block_constrained = is_cluster_constrained (block_id);
1186
+
1187
+ if (block_constrained) {
1188
+ bool intersect = intersect_range_limit_with_floorplan_constraints (block_id,
1189
+ search_range,
1190
+ delta_cx,
1191
+ to_layer_num);
1192
+ if (!intersect) {
1193
+ return false ;
1194
+ }
1195
+ }
1196
+
1197
+ if (is_io_type (block_type) && !block_constrained) {
1198
+ /* We empirically found that for the IO blocks,
1199
+ * Given their sparsity, we expand the y-axis search range
1200
+ * to include all blocks in the column
1201
+ */
1202
+ const t_compressed_block_grid& compressed_block_grid = g_vpr_ctx.placement ().compressed_block_grids [block_type->index ];
1203
+ search_range.ymin = 0 ;
1204
+ search_range.ymax = compressed_block_grid.get_num_rows (to_layer_num) - 1 ;
1205
+ }
1206
+
1207
+ return true ;
1208
+ }
1209
+
1225
1210
std::string e_move_result_to_string (e_move_result move_outcome) {
1226
1211
switch (move_outcome) {
1227
1212
case e_move_result::REJECTED:
0 commit comments