Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 78 additions & 58 deletions common/service/host/kcs.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,9 +302,8 @@ static void kcs_add_sel_handler(struct k_work *kcs_pldm_work)
ext_params.smbus_ext_params.addr = I2C_ADDR_BMC;
ext_params.ep = MCTP_EID_BMC;
}
pldm_platform_event_message_req(find_mctp_by_bus(bmc_bus),
ext_params, 0xFB, &kcs_work->ibuf[4],
pldm_event_length);
pldm_platform_event_message_req(find_mctp_by_bus(bmc_bus), ext_params, 0xFB,
&kcs_work->ibuf[4], pldm_event_length);

k_mutex_lock(&mutex_use_count, K_MSEC(1000));
work_count--;
Expand All @@ -317,7 +316,7 @@ static void kcs_crash_dump_handler(struct k_work *kcs_pldm_work)
struct kcs_work_info *kcs_work = CONTAINER_OF(kcs_pldm_work, struct kcs_work_info, work);
LOG_HEXDUMP_DBG(&kcs_work->ibuf[0], kcs_work->data_length, "host KCS read dump data:");
int ret = send_crashdump_to_bmc(&kcs_work->ibuf[2],
(kcs_work->data_length - 2)); // exclude netfn, cmd
(kcs_work->data_length - 2)); // exclude netfn, cmd
if (ret) {
LOG_ERR("Failed to send crashdump data to BMC, rc = %d", ret);
}
Expand Down Expand Up @@ -372,11 +371,8 @@ static void kcs_bios_fw_version_handler(struct k_work *kcs_pldm_work)
SAFE_FREE(kcs_work);
}

static int schedule_kcs_work_handle(k_work_handler_t handler,
uint8_t *kcs_buff,
kcs_dev *kcs_inst,
uint8_t *buf,
int data_length)
static int schedule_kcs_work_handle(k_work_handler_t handler, uint8_t *kcs_buff,
const kcs_dev *kcs_inst, const uint8_t *buf, int data_length)
{
// BIC return C3 to BIOS when kcs work queue is full or schedule work fail
// do next loop and waiting for BIOS retry
Expand All @@ -393,17 +389,13 @@ static int schedule_kcs_work_handle(k_work_handler_t handler,
LOG_ERR("kcs_work queue is full, work count = %d", work_count);
kcs_buff[2] = CC_TIMEOUT;
kcs_write(kcs_inst->index, kcs_buff, 3);
SAFE_FREE(kcs_work);
SAFE_FREE(kcs_buff);
return -1;
goto error;
}
// POST Start/End send event log to BMC only if kcs_write was successful
if (((kcs_work->ibuf[0] == NETFN_OEM_REQ) && (kcs_work->ibuf[1] == CMD_OEM_POST_START)) ||
((kcs_work->ibuf[0] == NETFN_OEM_REQ) && (kcs_work->ibuf[1] == CMD_OEM_POST_END))){
((kcs_work->ibuf[0] == NETFN_OEM_REQ) && (kcs_work->ibuf[1] == CMD_OEM_POST_END))) {
if (kcs_write(kcs_inst->index, kcs_buff, 3) != 0) {
SAFE_FREE(kcs_work);
SAFE_FREE(kcs_buff);
return -1;
goto error;
}
}
// put work in kcs work queue
Expand All @@ -412,14 +404,17 @@ static int schedule_kcs_work_handle(k_work_handler_t handler,
LOG_ERR("Failed to schedule work, netfn = %x, cmd = %x", kcs_buff[0], kcs_buff[1]);
kcs_buff[2] = CC_TIMEOUT;
kcs_write(kcs_inst->index, kcs_buff, 3);
SAFE_FREE(kcs_work);
SAFE_FREE(kcs_buff);
return -1;
goto error;
}
k_mutex_lock(&mutex_use_count, K_MSEC(1000));
work_count++;
k_mutex_unlock(&mutex_use_count);
return 0;

error:
SAFE_FREE(kcs_work);
SAFE_FREE(kcs_buff);
return -1;
}

#endif
Expand All @@ -434,7 +429,6 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
ipmb_error status;
#endif
ipmi_msg_cfg current_msg;
struct kcs_request *req;

ARG_UNUSED(arvg1);
ARG_UNUSED(arvg2);
Expand All @@ -457,13 +451,14 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
LOG_HEXDUMP_DBG(&ibuf[0], rc, "host KCS read dump data:");

proc_kcs_ok = true;
req = (struct kcs_request *)ibuf;
req->netfn = req->netfn >> 2;
const struct kcs_request *req = (struct kcs_request *)ibuf;
const uint8_t netfn_no_lun = req->netfn >> 2;

if (pal_request_msg_to_BIC_from_HOST(
req->netfn, req->cmd)) { // In-band update command, not bridging to bmc
netfn_no_lun,
req->cmd)) { // In-band update command, not bridging to bmc
current_msg.buffer.InF_source = HOST_KCS_1 + kcs_inst->index;
current_msg.buffer.netfn = req->netfn;
current_msg.buffer.netfn = netfn_no_lun;
current_msg.buffer.cmd = req->cmd;
current_msg.buffer.data_len = rc - 2; // exclude netfn, cmd
if (current_msg.buffer.data_len != 0) {
Expand All @@ -482,23 +477,27 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
LOG_ERR("Failed to malloc for kcs_buff");
continue;
}
kcs_buff[0] = (req->netfn | BIT(0)) << 2;
kcs_buff[0] = (netfn_no_lun | BIT(0)) << 2;
kcs_buff[1] = req->cmd;
kcs_buff[2] = CC_SUCCESS;
if (pal_immediate_respond_from_HOST(req->netfn, req->cmd)) {
if (((req->netfn == NETFN_STORAGE_REQ) &&
(req->cmd == CMD_STORAGE_ADD_SEL))) {
if (rc != ADD_SEL_EVENT_DATA_MAX_LEN) { // do not write kcs when data length error
LOG_ERR("ADD SEL event data length error, rc = %d", rc);
if (pal_immediate_respond_from_HOST(netfn_no_lun, req->cmd)) {
if (((netfn_no_lun == NETFN_STORAGE_REQ) &&
(req->cmd == CMD_STORAGE_ADD_SEL))) {
if (rc !=
ADD_SEL_EVENT_DATA_MAX_LEN) { // do not write kcs when data length error
LOG_ERR("ADD SEL event data length error, rc = %d",
rc);
kcs_buff[2] = CC_LENGTH_EXCEEDED;
kcs_write(kcs_inst->index, kcs_buff, 3);
SAFE_FREE(kcs_buff);
continue;
}
kcs_buff[3] = 0x00;
kcs_buff[4] = 0x00;
#ifdef ENABLE_PLDM
#ifndef ENABLE_OEM_PLDM
if (schedule_kcs_work_handle(kcs_add_sel_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
if (schedule_kcs_work_handle(kcs_add_sel_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
continue;
}
#endif
Expand All @@ -511,9 +510,9 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
#ifdef ENABLE_PLDM
#ifndef ENABLE_OEM_PLDM
// IPMI OEM command for crashdump, send crashdump data to BMC
if ((req->netfn == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_CRASH_DUMP)) {
if (schedule_kcs_work_handle(kcs_crash_dump_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
if ((netfn_no_lun == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_CRASH_DUMP)) {
if (schedule_kcs_work_handle(kcs_crash_dump_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
continue;
}
if (ibuf[6] == 0x80 && ibuf[10] == 0x01) {
Expand All @@ -524,39 +523,47 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
}
}
// IPMI OEM command for POST Start/End. Send SEL to BMC
if (((req->netfn == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_POST_START)) ||
((req->netfn == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_POST_END))) {
if (schedule_kcs_work_handle(kcs_oem_post_start_end_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
if (((netfn_no_lun == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_POST_START)) ||
((netfn_no_lun == NETFN_OEM_REQ) && (req->cmd == CMD_OEM_POST_END))) {
if (schedule_kcs_work_handle(kcs_oem_post_start_end_handler,
kcs_buff, kcs_inst, ibuf, rc) != 0) {
continue;
}
}
#endif
#endif
if ((req->netfn == NETFN_APP_REQ) &&
if ((netfn_no_lun == NETFN_APP_REQ) &&
(req->cmd == CMD_APP_SET_SYS_INFO_PARAMS) &&
(req->data[0] == CMD_SYS_INFO_FW_VERSION)) {
int ret = pal_record_bios_fw_version(ibuf, rc);
if (ret == -1) {
LOG_ERR("Record bios fw version fail");
kcs_buff[2] = CC_UNSPECIFIED_ERROR;
kcs_write(kcs_inst->index, kcs_buff, 3);
SAFE_FREE(kcs_buff);
continue;
}
#ifdef ENABLE_PLDM
#ifndef ENABLE_OEM_PLDM
if (schedule_kcs_work_handle(kcs_bios_fw_version_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
if (schedule_kcs_work_handle(kcs_bios_fw_version_handler, kcs_buff,
kcs_inst, ibuf, rc) != 0) {
continue;
} else {
kcs_write(kcs_inst->index, kcs_buff, 3);
}
#endif
#endif
}
if ((req->netfn == NETFN_OEM_Q_REQ) &&
if ((netfn_no_lun == NETFN_OEM_Q_REQ) &&
(req->cmd == CMD_OEM_Q_SET_DIMM_INFO) &&
(req->data[4] == CMD_DIMM_LOCATION)) {
int ret = pal_set_dimm_presence_status(ibuf);
if (!ret) {
LOG_ERR("Set dimm presence status fail");
kcs_buff[2] = CC_UNSPECIFIED_ERROR;
kcs_write(kcs_inst->index, kcs_buff, 3);
SAFE_FREE(kcs_buff);
continue;
}
}
SAFE_FREE(kcs_buff); // SAFE FREE kcs_buff
Expand All @@ -566,7 +573,7 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
bridge_msg.InF_source = HOST_KCS_1 + kcs_inst->index;
bridge_msg.InF_target =
BMC_IPMB; // default bypassing IPMI standard command to BMC
bridge_msg.netfn = req->netfn;
bridge_msg.netfn = netfn_no_lun;
bridge_msg.cmd = req->cmd;
if (bridge_msg.data_len != 0) {
memcpy(&bridge_msg.data[0], &ibuf[2], bridge_msg.data_len);
Expand All @@ -582,25 +589,38 @@ static void kcs_read_task(void *arvg0, void *arvg1, void *arvg2)
LOG_ERR("kcs_read_task send to BMC fail");
}

uint8_t *kcs_buff;
kcs_buff = malloc(3 + bridge_msg.data_len);
if (kcs_buff == NULL) {
uint8_t *bridge_kcs_buff;
bridge_kcs_buff = malloc(3 + bridge_msg.data_len);
if (bridge_kcs_buff == NULL) {
LOG_ERR("Memory allocation failed");
continue;
}

if (bridge_msg.netfn != (netfn_no_lun | BIT(0)) ||
bridge_msg.cmd != req->cmd) {
LOG_ERR("response bridge_msg and req_msg dont match, brg_netfn 0x%x, brg_cmd 0x%x, req_netfn 0x%x, req_cmd 0x%x.",
bridge_msg.netfn, bridge_msg.cmd, netfn_no_lun,
req->cmd);
bridge_kcs_buff[0] = (netfn_no_lun | BIT(0)) << 2;
bridge_kcs_buff[1] = req->cmd;
bridge_kcs_buff[2] = CC_UNSPECIFIED_ERROR;
kcs_write(kcs_inst->index, bridge_kcs_buff, 3);
SAFE_FREE(bridge_kcs_buff);
continue;
}

// Write MCTP/PLDM response to KCS
kcs_buff[0] = (bridge_msg.netfn | BIT(0)) << 2;
kcs_buff[1] = bridge_msg.cmd;
kcs_buff[2] = bridge_msg.completion_code;
memcpy(&kcs_buff[3], &bridge_msg.data, bridge_msg.data_len);
bridge_kcs_buff[0] = (bridge_msg.netfn | BIT(0)) << 2;
bridge_kcs_buff[1] = bridge_msg.cmd;
bridge_kcs_buff[2] = bridge_msg.completion_code;
memcpy(&bridge_kcs_buff[3], &bridge_msg.data, bridge_msg.data_len);

if (!pal_immediate_respond_from_HOST(req->netfn, req->cmd)) {
kcs_write(kcs_inst->index, kcs_buff,
if (!pal_immediate_respond_from_HOST(netfn_no_lun, req->cmd)) {
kcs_write(kcs_inst->index, bridge_kcs_buff,
3 + bridge_msg.data_len);
}

SAFE_FREE(kcs_buff);
SAFE_FREE(bridge_kcs_buff);
} else {
status = ipmb_send_request(&bridge_msg,
IPMB_inf_index_map[BMC_IPMB]);
Expand Down Expand Up @@ -647,10 +667,10 @@ void kcs_device_init(char **config, uint8_t size)
}
#ifdef ENABLE_PLDM
#ifndef ENABLE_OEM_PLDM
k_work_queue_start(&kcs_work_q, kcs_stack_area,
K_THREAD_STACK_SIZEOF(kcs_stack_area), CONFIG_MAIN_THREAD_PRIORITY, NULL);
k_thread_name_set(&kcs_work_q.thread, "kcs_worker");
k_mutex_init(&mutex_use_count);
k_work_queue_start(&kcs_work_q, kcs_stack_area, K_THREAD_STACK_SIZEOF(kcs_stack_area),
CONFIG_MAIN_THREAD_PRIORITY, NULL);
k_thread_name_set(&kcs_work_q.thread, "kcs_worker");
k_mutex_init(&mutex_use_count);
#endif
#endif

Expand Down
11 changes: 11 additions & 0 deletions common/service/pldm/pldm.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,14 @@ LOG_MODULE_REGISTER(pldm);

#define PLDM_FW_UPDATE_MAX_RETRY 3

#ifndef PLDM_BRIDGE_IPMI_TIMEOUT_MS
#define PLDM_BRIDGE_IPMI_TIMEOUT_MS PLDM_MSG_TIMEOUT_MS
#endif

#ifndef PLDM_BRIDGE_IPMI_MAX_RETRY
#define PLDM_BRIDGE_IPMI_MAX_RETRY PLDM_MSG_MAX_RETRY
#endif

#define PLDM_RESP_MSG_PROC_MUTEX_TIMEOUT_MS 500
#define PLDM_TASK_NAME_MAX_SIZE 32

Expand Down Expand Up @@ -210,6 +218,9 @@ uint16_t mctp_pldm_read(void *mctp_p, pldm_msg *msg, uint8_t *rbuf, uint16_t rbu
if (msg->hdr.pldm_type == PLDM_TYPE_FW_UPDATE) {
msg->timeout_ms = PLDM_FW_UPDATE_TIMEOUT_MS;
max_retry = PLDM_FW_UPDATE_MAX_RETRY;
} else if (msg->hdr.cmd == PLDM_OEM_IPMI_BRIDGE) {
msg->timeout_ms = PLDM_BRIDGE_IPMI_TIMEOUT_MS;
max_retry = PLDM_BRIDGE_IPMI_MAX_RETRY;
} else {
msg->timeout_ms = PLDM_MSG_TIMEOUT_MS;
max_retry = PLDM_MSG_MAX_RETRY;
Expand Down
2 changes: 2 additions & 0 deletions meta-facebook/yv35-cl/src/platform/plat_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,7 @@

#define WORKER_STACK_SIZE 4096
#define PLAT_MCTP_MSG_MAX_SIZE 64
#define PLDM_MSG_TIMEOUT_MS 5000
#define PLDM_BRIDGE_IPMI_TIMEOUT_MS 1000

#endif