Skip to content

Commit 9d8ffa5

Browse files
committed
fix CI
1 parent 4c02664 commit 9d8ffa5

File tree

3 files changed

+7
-9
lines changed

3 files changed

+7
-9
lines changed

models/qwen.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1259,7 +1259,7 @@ namespace chatllm::qwen::vit
12591259
:
12601260
GRAPH_SIZE(GRAPH_SIZE), _ctx(&backend_context),
12611261
n_threads(runtime_config.n_threads),
1262-
vis_config(0),
1262+
vis_config(),
12631263
max_patches(max_patches)
12641264
{
12651265
_ctx.cache_dtype = runtime_config.cache_type;
@@ -1425,8 +1425,6 @@ namespace chatllm::qwen::v2_5_vl
14251425
ids.push_back(vision_end_token_id);
14261426
}
14271427

1428-
static BlockParams::PadEmbedding *pad_arg = nullptr;
1429-
14301428
struct ImageGridSize
14311429
{
14321430
int w, h;
@@ -1440,8 +1438,8 @@ namespace chatllm::qwen::v2_5_vl
14401438
{
14411439
public:
14421440
TensorPosHelper3D(int max_length, int image_id_start, ConditionalGeneration *gen)
1443-
: original_length(max_length), image_id_start(image_id_start),
1444-
BaseTensorPosHelper(max_length * 4),
1441+
: BaseTensorPosHelper(max_length * 4),
1442+
original_length(max_length), image_id_start(image_id_start),
14451443
gen(gen)
14461444
{
14471445
}
@@ -1598,7 +1596,7 @@ namespace chatllm::qwen::v2_5_vl
15981596
continue;
15991597
}
16001598

1601-
CHATLLM_CHECK(mm_index < images_grid.size());
1599+
CHATLLM_CHECK(mm_index < (int)images_grid.size());
16021600

16031601
auto &dim = images_grid[mm_index++];
16041602
for (int f = 0; f < dim.frame_num; f++, t += token_n_inc)

src/layers.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2500,10 +2500,10 @@ namespace chatllm
25002500
num_local_experts(num_local_experts), num_experts_per_tok(num_experts_per_tok),
25012501
gate(ctx, hidden_size, num_local_experts, gate_use_bias),
25022502
mover(new CPUMover(ctx, ctx->user_options.moe_on_cpu)),
2503-
experts(ctx, hidden_size, intermediate_size, num_local_experts, num_experts_per_tok, act, experts_use_bias),
25042503
gate_score_correction_bias(gate_score_use_bias ? ggml::new_tensor_1d(ctx, GGML_TYPE_F32, num_local_experts) : nullptr),
25052504
group_indices(grouped_max ? ggml::new_tensor_2d(ctx, GGML_TYPE_I32, 1, num_experts_per_tok) : nullptr),
25062505
router_scale(router_scale ? ggml::new_tensor_1d(ctx, GGML_TYPE_F32, num_local_experts) : nullptr),
2506+
experts(ctx, hidden_size, intermediate_size, num_local_experts, num_experts_per_tok, act, experts_use_bias),
25072507
norm_topk_prob(true),
25082508
score_func(ScoreFunc::Softmax),
25092509
routed_scaling_factor(-1.0f),

src/layers.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,10 +1272,10 @@ namespace chatllm
12721272
attn_scaling(true),
12731273
causal(true),
12741274
last_attn_scores(nullptr),
1275-
pos_helper(helper ? helper : &def_pos_helper),
12761275
sinks(BlockParams::CoreAttentionUseSinks::get() > 0 ?
12771276
ggml::new_tensor_1d(ctx, ggml::type::GGML_TYPE_F32, BlockParams::CoreAttentionUseSinks::get())
1278-
: nullptr)
1277+
: nullptr),
1278+
pos_helper(helper ? helper : &def_pos_helper)
12791279
{
12801280
allocate_pos_tensor(ctx);
12811281
}

0 commit comments

Comments
 (0)