1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <crypto/sha2.h>
16 #define LSA_SIZE SZ_128K
17 #define FW_SIZE SZ_64M
19 #define DEV_SIZE SZ_2G
20 #define EFFECT(x) (1U << x)
22 #define MOCK_INJECT_DEV_MAX 8
23 #define MOCK_INJECT_TEST_MAX 128
25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
27 enum cxl_command_effects {
28 CONF_CHANGE_COLD_RESET = 0,
29 CONF_CHANGE_IMMEDIATE,
30 DATA_CHANGE_IMMEDIATE,
31 POLICY_CHANGE_IMMEDIATE,
33 SECURITY_CHANGE_IMMEDIATE,
35 SECONDARY_MBOX_SUPPORTED,
38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
40 static struct cxl_cel_entry mock_cel[] = {
42 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43 .effect = CXL_CMD_EFFECT_NONE,
46 .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47 .effect = CXL_CMD_EFFECT_NONE,
50 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51 .effect = CXL_CMD_EFFECT_NONE,
54 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55 .effect = CXL_CMD_EFFECT_NONE,
58 .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60 EFFECT(DATA_CHANGE_IMMEDIATE)),
63 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64 .effect = CXL_CMD_EFFECT_NONE,
67 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68 .effect = CXL_CMD_EFFECT_NONE,
71 .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
75 .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76 .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
79 .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80 .effect = CXL_CMD_EFFECT_NONE,
83 .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85 EFFECT(BACKGROUND_OP)),
88 .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89 .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90 EFFECT(CONF_CHANGE_IMMEDIATE)),
94 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
95 struct cxl_mbox_health_info {
101 __le32 dirty_shutdowns;
102 __le32 volatile_errors;
107 struct cxl_mbox_get_supported_logs gsl;
108 struct cxl_gsl_entry entry;
109 } mock_gsl_payload = {
111 .entries = cpu_to_le16(1),
114 .uuid = DEFINE_CXL_CEL_UUID,
115 .size = cpu_to_le32(sizeof(mock_cel)),
119 #define PASS_TRY_LIMIT 3
121 #define CXL_TEST_EVENT_CNT_MAX 15
123 /* Set a number of events to return at a time for simulation. */
124 #define CXL_TEST_EVENT_CNT 3
126 struct mock_event_log {
132 struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
135 struct mock_event_store {
136 struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
140 struct cxl_mockmem_data {
147 u8 user_pass[NVDIMM_PASSPHRASE_LEN];
148 u8 master_pass[NVDIMM_PASSPHRASE_LEN];
151 struct mock_event_store mes;
152 struct cxl_memdev_state *mds;
157 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
159 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
161 if (log_type >= CXL_EVENT_TYPE_MAX)
163 return &mdata->mes.mock_logs[log_type];
166 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
168 return log->events[log->cur_idx];
171 static void event_reset_log(struct mock_event_log *log)
175 log->nr_overflow = log->overflow_reset;
178 /* Handle can never be 0 use 1 based indexing for handle */
179 static u16 event_get_clear_handle(struct mock_event_log *log)
181 return log->clear_idx + 1;
184 /* Handle can never be 0 use 1 based indexing for handle */
185 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
187 u16 cur_handle = log->cur_idx + 1;
189 return cpu_to_le16(cur_handle);
192 static bool event_log_empty(struct mock_event_log *log)
194 return log->cur_idx == log->nr_events;
197 static void mes_add_event(struct mock_event_store *mes,
198 enum cxl_event_log_type log_type,
199 struct cxl_event_record_raw *event)
201 struct mock_event_log *log;
203 if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
206 log = &mes->mock_logs[log_type];
208 if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
210 log->overflow_reset = log->nr_overflow;
214 log->events[log->nr_events] = event;
218 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
220 struct cxl_get_event_payload *pl;
221 struct mock_event_log *log;
226 if (cmd->size_in != sizeof(log_type))
229 if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
232 log_type = *((u8 *)cmd->payload_in);
233 if (log_type >= CXL_EVENT_TYPE_MAX)
236 memset(cmd->payload_out, 0, cmd->size_out);
238 log = event_find_log(dev, log_type);
239 if (!log || event_log_empty(log))
242 pl = cmd->payload_out;
244 for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
245 memcpy(&pl->records[i], event_get_current(log),
246 sizeof(pl->records[i]));
247 pl->records[i].hdr.handle = event_get_cur_event_handle(log);
251 pl->record_count = cpu_to_le16(i);
252 if (!event_log_empty(log))
253 pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
255 if (log->nr_overflow) {
258 pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
259 pl->overflow_err_count = cpu_to_le16(nr_overflow);
260 ns = ktime_get_real_ns();
261 ns -= 5000000000; /* 5s ago */
262 pl->first_overflow_timestamp = cpu_to_le64(ns);
263 ns = ktime_get_real_ns();
264 ns -= 1000000000; /* 1s ago */
265 pl->last_overflow_timestamp = cpu_to_le64(ns);
271 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
273 struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
274 struct mock_event_log *log;
275 u8 log_type = pl->event_log;
279 if (log_type >= CXL_EVENT_TYPE_MAX)
282 log = event_find_log(dev, log_type);
284 return 0; /* No mock data in this log */
287 * This check is technically not invalid per the specification AFAICS.
288 * (The host could 'guess' handles and clear them in order).
289 * However, this is not good behavior for the host so test it.
291 if (log->clear_idx + pl->nr_recs > log->cur_idx) {
293 "Attempting to clear more events than returned!\n");
297 /* Check handle order prior to clearing events */
298 for (nr = 0, handle = event_get_clear_handle(log);
301 if (handle != le16_to_cpu(pl->handles[nr])) {
302 dev_err(dev, "Clearing events out of order\n");
307 if (log->nr_overflow)
308 log->nr_overflow = 0;
311 log->clear_idx += pl->nr_recs;
315 static void cxl_mock_event_trigger(struct device *dev)
317 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
318 struct mock_event_store *mes = &mdata->mes;
321 for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
322 struct mock_event_log *log;
324 log = event_find_log(dev, i);
326 event_reset_log(log);
329 cxl_mem_get_event_records(mdata->mds, mes->ev_status);
332 struct cxl_event_record_raw maint_needed = {
334 .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
335 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
336 .length = sizeof(struct cxl_event_record_raw),
337 .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
338 /* .handle = Set dynamically */
339 .related_handle = cpu_to_le16(0xa5b6),
341 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
344 struct cxl_event_record_raw hardware_replace = {
346 .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
347 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
348 .length = sizeof(struct cxl_event_record_raw),
349 .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
350 /* .handle = Set dynamically */
351 .related_handle = cpu_to_le16(0xb6a5),
353 .data = { 0xDE, 0xAD, 0xBE, 0xEF },
356 struct cxl_event_gen_media gen_media = {
358 .id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
359 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
360 .length = sizeof(struct cxl_event_gen_media),
361 .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
362 /* .handle = Set dynamically */
363 .related_handle = cpu_to_le16(0),
365 .phys_addr = cpu_to_le64(0x2000),
366 .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
367 .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
368 .transaction_type = CXL_GMER_TRANS_HOST_WRITE,
369 /* .validity_flags = <set below> */
374 struct cxl_event_dram dram = {
376 .id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
377 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
378 .length = sizeof(struct cxl_event_dram),
379 .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
380 /* .handle = Set dynamically */
381 .related_handle = cpu_to_le16(0),
383 .phys_addr = cpu_to_le64(0x8000),
384 .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
385 .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
386 .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
387 /* .validity_flags = <set below> */
391 .column = {0xDE, 0xAD},
394 struct cxl_event_mem_module mem_module = {
396 .id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
397 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
398 .length = sizeof(struct cxl_event_mem_module),
399 /* .handle = Set dynamically */
400 .related_handle = cpu_to_le16(0),
402 .event_type = CXL_MMER_TEMP_CHANGE,
404 .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
405 .media_status = CXL_DHI_MS_ALL_DATA_LOST,
406 .add_status = (CXL_DHI_AS_CRITICAL << 2) |
407 (CXL_DHI_AS_WARNING << 4) |
408 (CXL_DHI_AS_WARNING << 5),
409 .device_temp = { 0xDE, 0xAD},
410 .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
411 .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
412 .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
416 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
417 struct cxl_mbox_cmd *cmd)
419 struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
420 struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
422 if (cmd->size_in != sizeof(*ts))
425 if (cmd->size_out != 0)
428 mdata->timestamp = le64_to_cpu(ts->timestamp);
432 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
434 put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
435 &gen_media.validity_flags);
437 put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
438 CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
439 &dram.validity_flags);
441 mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
442 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
443 (struct cxl_event_record_raw *)&gen_media);
444 mes_add_event(mes, CXL_EVENT_TYPE_INFO,
445 (struct cxl_event_record_raw *)&mem_module);
446 mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
448 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
449 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
450 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
451 (struct cxl_event_record_raw *)&dram);
452 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
453 (struct cxl_event_record_raw *)&gen_media);
454 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
455 (struct cxl_event_record_raw *)&mem_module);
456 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
457 mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
458 (struct cxl_event_record_raw *)&dram);
459 /* Overflow this log */
460 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
461 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
462 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
463 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
464 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
465 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
466 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
467 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
468 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
469 mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
470 mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
472 mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
473 mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
474 (struct cxl_event_record_raw *)&dram);
475 mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
478 static int mock_gsl(struct cxl_mbox_cmd *cmd)
480 if (cmd->size_out < sizeof(mock_gsl_payload))
483 memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
484 cmd->size_out = sizeof(mock_gsl_payload);
489 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
491 struct cxl_mbox_get_log *gl = cmd->payload_in;
492 u32 offset = le32_to_cpu(gl->offset);
493 u32 length = le32_to_cpu(gl->length);
494 uuid_t uuid = DEFINE_CXL_CEL_UUID;
495 void *data = &mock_cel;
497 if (cmd->size_in < sizeof(*gl))
499 if (length > mds->payload_size)
501 if (offset + length > sizeof(mock_cel))
503 if (!uuid_equal(&gl->uuid, &uuid))
505 if (length > cmd->size_out)
508 memcpy(cmd->payload_out, data + offset, length);
513 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
515 struct cxl_mbox_identify id = {
516 .fw_revision = { "mock fw v1 " },
518 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
520 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
523 if (cmd->size_out < sizeof(id))
526 memcpy(cmd->payload_out, &id, sizeof(id));
531 static int mock_id(struct cxl_mbox_cmd *cmd)
533 struct cxl_mbox_identify id = {
534 .fw_revision = { "mock fw v1 " },
535 .lsa_size = cpu_to_le32(LSA_SIZE),
537 cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
539 cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
540 .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
543 put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
545 if (cmd->size_out < sizeof(id))
548 memcpy(cmd->payload_out, &id, sizeof(id));
553 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
555 struct cxl_mbox_get_partition_info pi = {
556 .active_volatile_cap =
557 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
558 .active_persistent_cap =
559 cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
562 if (cmd->size_out < sizeof(pi))
565 memcpy(cmd->payload_out, &pi, sizeof(pi));
570 static int mock_sanitize(struct cxl_mockmem_data *mdata,
571 struct cxl_mbox_cmd *cmd)
573 if (cmd->size_in != 0)
576 if (cmd->size_out != 0)
579 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
580 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
583 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
584 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
588 return 0; /* assume less than 2 secs, no bg */
591 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
592 struct cxl_mbox_cmd *cmd)
594 if (cmd->size_in != 0)
597 if (cmd->size_out != 0)
600 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
601 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
605 if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
606 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
613 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
614 struct cxl_mbox_cmd *cmd)
619 if (cmd->size_out != sizeof(u32))
622 memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
627 static void master_plimit_check(struct cxl_mockmem_data *mdata)
629 if (mdata->master_limit == PASS_TRY_LIMIT)
631 mdata->master_limit++;
632 if (mdata->master_limit == PASS_TRY_LIMIT)
633 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
636 static void user_plimit_check(struct cxl_mockmem_data *mdata)
638 if (mdata->user_limit == PASS_TRY_LIMIT)
641 if (mdata->user_limit == PASS_TRY_LIMIT)
642 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
645 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
646 struct cxl_mbox_cmd *cmd)
648 struct cxl_set_pass *set_pass;
650 if (cmd->size_in != sizeof(*set_pass))
653 if (cmd->size_out != 0)
656 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
657 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
661 set_pass = cmd->payload_in;
662 switch (set_pass->type) {
663 case CXL_PMEM_SEC_PASS_MASTER:
664 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
665 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
669 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
670 * the security disabled state when the user passphrase is not set.
672 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
673 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
676 if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
677 master_plimit_check(mdata);
678 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
681 memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
682 mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
685 case CXL_PMEM_SEC_PASS_USER:
686 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
687 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
690 if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
691 user_plimit_check(mdata);
692 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
695 memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
696 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
700 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
705 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
706 struct cxl_mbox_cmd *cmd)
708 struct cxl_disable_pass *dis_pass;
710 if (cmd->size_in != sizeof(*dis_pass))
713 if (cmd->size_out != 0)
716 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
717 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
721 dis_pass = cmd->payload_in;
722 switch (dis_pass->type) {
723 case CXL_PMEM_SEC_PASS_MASTER:
724 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
725 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
729 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
730 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
734 if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
735 master_plimit_check(mdata);
736 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
740 mdata->master_limit = 0;
741 memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
742 mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
745 case CXL_PMEM_SEC_PASS_USER:
746 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
747 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
751 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
752 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
756 if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
757 user_plimit_check(mdata);
758 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
762 mdata->user_limit = 0;
763 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
764 mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
765 CXL_PMEM_SEC_STATE_LOCKED);
769 cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
776 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
777 struct cxl_mbox_cmd *cmd)
779 if (cmd->size_in != 0)
782 if (cmd->size_out != 0)
785 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
788 mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
792 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
793 struct cxl_mbox_cmd *cmd)
795 if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
798 if (cmd->size_out != 0)
801 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
802 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
806 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
807 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
811 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
812 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
816 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
817 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
821 if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
822 if (++mdata->user_limit == PASS_TRY_LIMIT)
823 mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
824 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
828 mdata->user_limit = 0;
829 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
833 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
834 struct cxl_mbox_cmd *cmd)
836 struct cxl_pass_erase *erase;
838 if (cmd->size_in != sizeof(*erase))
841 if (cmd->size_out != 0)
844 erase = cmd->payload_in;
845 if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
846 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
850 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
851 erase->type == CXL_PMEM_SEC_PASS_USER) {
852 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
856 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
857 erase->type == CXL_PMEM_SEC_PASS_MASTER) {
858 cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
862 switch (erase->type) {
863 case CXL_PMEM_SEC_PASS_MASTER:
865 * The spec does not clearly define the behavior of the scenario
866 * where a master passphrase is passed in while the master
867 * passphrase is not set and user passphrase is not set. The
868 * code will take the assumption that it will behave the same
869 * as a CXL secure erase command without passphrase (0x4401).
871 if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
872 if (memcmp(mdata->master_pass, erase->pass,
873 NVDIMM_PASSPHRASE_LEN)) {
874 master_plimit_check(mdata);
875 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
878 mdata->master_limit = 0;
879 mdata->user_limit = 0;
880 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
881 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
882 mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
885 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
886 * When master passphrase is disabled, the device shall
887 * return Invalid Input for the Passphrase Secure Erase
888 * command with master passphrase.
892 /* Scramble encryption keys so that data is effectively erased */
894 case CXL_PMEM_SEC_PASS_USER:
896 * The spec does not clearly define the behavior of the scenario
897 * where a user passphrase is passed in while the user
898 * passphrase is not set. The code will take the assumption that
899 * it will behave the same as a CXL secure erase command without
900 * passphrase (0x4401).
902 if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
903 if (memcmp(mdata->user_pass, erase->pass,
904 NVDIMM_PASSPHRASE_LEN)) {
905 user_plimit_check(mdata);
906 cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
909 mdata->user_limit = 0;
910 mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
911 memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
915 * CXL rev3 Table 8-118
916 * If user passphrase is not set or supported by device, current
917 * passphrase value is ignored. Will make the assumption that
918 * the operation will proceed as secure erase w/o passphrase
919 * since spec is not explicit.
922 /* Scramble encryption keys so that data is effectively erased */
931 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
932 struct cxl_mbox_cmd *cmd)
934 struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
935 void *lsa = mdata->lsa;
938 if (sizeof(*get_lsa) > cmd->size_in)
940 offset = le32_to_cpu(get_lsa->offset);
941 length = le32_to_cpu(get_lsa->length);
942 if (offset + length > LSA_SIZE)
944 if (length > cmd->size_out)
947 memcpy(cmd->payload_out, lsa + offset, length);
951 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
952 struct cxl_mbox_cmd *cmd)
954 struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
955 void *lsa = mdata->lsa;
958 if (sizeof(*set_lsa) > cmd->size_in)
960 offset = le32_to_cpu(set_lsa->offset);
961 length = cmd->size_in - sizeof(*set_lsa);
962 if (offset + length > LSA_SIZE)
965 memcpy(lsa + offset, &set_lsa->data[0], length);
969 static int mock_health_info(struct cxl_mbox_cmd *cmd)
971 struct cxl_mbox_health_info health_info = {
972 /* set flags for maint needed, perf degraded, hw replacement */
973 .health_status = 0x7,
974 /* set media status to "All Data Lost" */
977 * set ext_status flags for:
978 * ext_life_used: normal,
979 * ext_temperature: critical,
980 * ext_corrected_volatile: warning,
981 * ext_corrected_persistent: normal,
985 .temperature = cpu_to_le16(25),
986 .dirty_shutdowns = cpu_to_le32(10),
987 .volatile_errors = cpu_to_le32(20),
988 .pmem_errors = cpu_to_le32(30),
991 if (cmd->size_out < sizeof(health_info))
994 memcpy(cmd->payload_out, &health_info, sizeof(health_info));
998 static struct mock_poison {
999 struct cxl_dev_state *cxlds;
1001 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1003 static struct cxl_mbox_poison_out *
1004 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1006 struct cxl_mbox_poison_out *po;
1010 po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1014 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1015 if (mock_poison_list[i].cxlds != cxlds)
1017 if (mock_poison_list[i].dpa < offset ||
1018 mock_poison_list[i].dpa > offset + length - 1)
1021 dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1022 po->record[nr_records].address = cpu_to_le64(dpa);
1023 po->record[nr_records].length = cpu_to_le32(1);
1025 if (nr_records == poison_inject_dev_max)
1029 /* Always return count, even when zero */
1030 po->count = cpu_to_le16(nr_records);
1035 static int mock_get_poison(struct cxl_dev_state *cxlds,
1036 struct cxl_mbox_cmd *cmd)
1038 struct cxl_mbox_poison_in *pi = cmd->payload_in;
1039 struct cxl_mbox_poison_out *po;
1040 u64 offset = le64_to_cpu(pi->offset);
1041 u64 length = le64_to_cpu(pi->length);
1044 po = cxl_get_injected_po(cxlds, offset, length);
1047 nr_records = le16_to_cpu(po->count);
1048 memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1049 cmd->size_out = struct_size(po, record, nr_records);
1055 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1059 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1060 if (mock_poison_list[i].cxlds == cxlds)
1063 return (count >= poison_inject_dev_max);
1066 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1068 if (mock_poison_dev_max_injected(cxlds)) {
1070 "Device poison injection limit has been reached: %d\n",
1071 MOCK_INJECT_DEV_MAX);
1075 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1076 if (!mock_poison_list[i].cxlds) {
1077 mock_poison_list[i].cxlds = cxlds;
1078 mock_poison_list[i].dpa = dpa;
1083 "Mock test poison injection limit has been reached: %d\n",
1084 MOCK_INJECT_TEST_MAX);
1089 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1091 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1092 if (mock_poison_list[i].cxlds == cxlds &&
1093 mock_poison_list[i].dpa == dpa)
1099 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1100 struct cxl_mbox_cmd *cmd)
1102 struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1103 u64 dpa = le64_to_cpu(pi->address);
1105 if (mock_poison_found(cxlds, dpa)) {
1106 /* Not an error to inject poison if already poisoned */
1107 dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1110 if (!mock_poison_add(cxlds, dpa))
1116 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1118 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1119 if (mock_poison_list[i].cxlds == cxlds &&
1120 mock_poison_list[i].dpa == dpa) {
1121 mock_poison_list[i].cxlds = NULL;
1128 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1129 struct cxl_mbox_cmd *cmd)
1131 struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1132 u64 dpa = le64_to_cpu(pi->address);
1135 * A real CXL device will write pi->write_data to the address
1136 * being cleared. In this mock, just delete this address from
1137 * the mock poison list.
1139 if (!mock_poison_del(cxlds, dpa))
1140 dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1145 static bool mock_poison_list_empty(void)
1147 for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1148 if (mock_poison_list[i].cxlds)
1154 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1156 return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1159 static ssize_t poison_inject_max_store(struct device_driver *drv,
1160 const char *buf, size_t len)
1164 if (kstrtoint(buf, 0, &val) < 0)
1167 if (!mock_poison_list_empty())
1170 if (val <= MOCK_INJECT_TEST_MAX)
1171 poison_inject_dev_max = val;
1178 static DRIVER_ATTR_RW(poison_inject_max);
1180 static struct attribute *cxl_mock_mem_core_attrs[] = {
1181 &driver_attr_poison_inject_max.attr,
1184 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1186 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1187 struct cxl_mbox_cmd *cmd)
1189 struct cxl_mbox_get_fw_info fw_info = {
1190 .num_slots = FW_SLOTS,
1191 .slot_info = (mdata->fw_slot & 0x7) |
1192 ((mdata->fw_staged & 0x7) << 3),
1193 .activation_cap = 0,
1196 strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1197 strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1198 strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1199 strcpy(fw_info.slot_4_revision, "");
1201 if (cmd->size_out < sizeof(fw_info))
1204 memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1208 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1209 struct cxl_mbox_cmd *cmd)
1211 struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1212 void *fw = mdata->fw;
1213 size_t offset, length;
1215 offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1216 length = cmd->size_in - sizeof(*transfer);
1217 if (offset + length > FW_SIZE)
1220 switch (transfer->action) {
1221 case CXL_FW_TRANSFER_ACTION_FULL:
1225 case CXL_FW_TRANSFER_ACTION_END:
1226 if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1228 mdata->fw_size = offset + length;
1230 case CXL_FW_TRANSFER_ACTION_INITIATE:
1231 case CXL_FW_TRANSFER_ACTION_CONTINUE:
1233 case CXL_FW_TRANSFER_ACTION_ABORT:
1239 memcpy(fw + offset, transfer->data, length);
1243 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1244 struct cxl_mbox_cmd *cmd)
1246 struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1248 if (activate->slot == 0 || activate->slot > FW_SLOTS)
1251 switch (activate->action) {
1252 case CXL_FW_ACTIVATE_ONLINE:
1253 mdata->fw_slot = activate->slot;
1254 mdata->fw_staged = 0;
1256 case CXL_FW_ACTIVATE_OFFLINE:
1257 mdata->fw_staged = activate->slot;
1264 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1265 struct cxl_mbox_cmd *cmd)
1267 struct cxl_dev_state *cxlds = &mds->cxlds;
1268 struct device *dev = cxlds->dev;
1269 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1272 switch (cmd->opcode) {
1273 case CXL_MBOX_OP_SET_TIMESTAMP:
1274 rc = mock_set_timestamp(cxlds, cmd);
1276 case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1279 case CXL_MBOX_OP_GET_LOG:
1280 rc = mock_get_log(mds, cmd);
1282 case CXL_MBOX_OP_IDENTIFY:
1284 rc = mock_rcd_id(cmd);
1288 case CXL_MBOX_OP_GET_LSA:
1289 rc = mock_get_lsa(mdata, cmd);
1291 case CXL_MBOX_OP_GET_PARTITION_INFO:
1292 rc = mock_partition_info(cmd);
1294 case CXL_MBOX_OP_GET_EVENT_RECORD:
1295 rc = mock_get_event(dev, cmd);
1297 case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1298 rc = mock_clear_event(dev, cmd);
1300 case CXL_MBOX_OP_SET_LSA:
1301 rc = mock_set_lsa(mdata, cmd);
1303 case CXL_MBOX_OP_GET_HEALTH_INFO:
1304 rc = mock_health_info(cmd);
1306 case CXL_MBOX_OP_SANITIZE:
1307 rc = mock_sanitize(mdata, cmd);
1309 case CXL_MBOX_OP_SECURE_ERASE:
1310 rc = mock_secure_erase(mdata, cmd);
1312 case CXL_MBOX_OP_GET_SECURITY_STATE:
1313 rc = mock_get_security_state(mdata, cmd);
1315 case CXL_MBOX_OP_SET_PASSPHRASE:
1316 rc = mock_set_passphrase(mdata, cmd);
1318 case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1319 rc = mock_disable_passphrase(mdata, cmd);
1321 case CXL_MBOX_OP_FREEZE_SECURITY:
1322 rc = mock_freeze_security(mdata, cmd);
1324 case CXL_MBOX_OP_UNLOCK:
1325 rc = mock_unlock_security(mdata, cmd);
1327 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1328 rc = mock_passphrase_secure_erase(mdata, cmd);
1330 case CXL_MBOX_OP_GET_POISON:
1331 rc = mock_get_poison(cxlds, cmd);
1333 case CXL_MBOX_OP_INJECT_POISON:
1334 rc = mock_inject_poison(cxlds, cmd);
1336 case CXL_MBOX_OP_CLEAR_POISON:
1337 rc = mock_clear_poison(cxlds, cmd);
1339 case CXL_MBOX_OP_GET_FW_INFO:
1340 rc = mock_fw_info(mdata, cmd);
1342 case CXL_MBOX_OP_TRANSFER_FW:
1343 rc = mock_transfer_fw(mdata, cmd);
1345 case CXL_MBOX_OP_ACTIVATE_FW:
1346 rc = mock_activate_fw(mdata, cmd);
1352 dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1353 cmd->size_in, cmd->size_out, rc);
1358 static void label_area_release(void *lsa)
1363 static void fw_buf_release(void *buf)
1368 static bool is_rcd(struct platform_device *pdev)
1370 const struct platform_device_id *id = platform_get_device_id(pdev);
1372 return !!id->driver_data;
1375 static ssize_t event_trigger_store(struct device *dev,
1376 struct device_attribute *attr,
1377 const char *buf, size_t count)
1379 cxl_mock_event_trigger(dev);
1382 static DEVICE_ATTR_WO(event_trigger);
1384 static int cxl_mock_mem_probe(struct platform_device *pdev)
1386 struct device *dev = &pdev->dev;
1387 struct cxl_memdev *cxlmd;
1388 struct cxl_memdev_state *mds;
1389 struct cxl_dev_state *cxlds;
1390 struct cxl_mockmem_data *mdata;
1393 mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1396 dev_set_drvdata(dev, mdata);
1398 mdata->lsa = vmalloc(LSA_SIZE);
1401 mdata->fw = vmalloc(FW_SIZE);
1406 rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1410 rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1414 mds = cxl_memdev_state_create(dev);
1416 return PTR_ERR(mds);
1419 mds->mbox_send = cxl_mock_mbox_send;
1420 mds->payload_size = SZ_4K;
1421 mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1423 cxlds = &mds->cxlds;
1424 cxlds->serial = pdev->id;
1427 cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1430 rc = cxl_enumerate_cmds(mds);
1434 rc = cxl_poison_state_init(mds);
1438 rc = cxl_set_timestamp(mds);
1442 cxlds->media_ready = true;
1443 rc = cxl_dev_state_identify(mds);
1447 rc = cxl_mem_create_range_info(mds);
1451 cxl_mock_add_event_logs(&mdata->mes);
1453 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1455 return PTR_ERR(cxlmd);
1457 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1461 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1466 static ssize_t security_lock_show(struct device *dev,
1467 struct device_attribute *attr, char *buf)
1469 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1471 return sysfs_emit(buf, "%u\n",
1472 !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1475 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1476 const char *buf, size_t count)
1478 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1479 u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1480 CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1483 if (kstrtoint(buf, 0, &val) < 0)
1487 if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1489 mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1490 mdata->security_state &= ~mask;
1497 static DEVICE_ATTR_RW(security_lock);
1499 static ssize_t fw_buf_checksum_show(struct device *dev,
1500 struct device_attribute *attr, char *buf)
1502 struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1503 u8 hash[SHA256_DIGEST_SIZE];
1504 unsigned char *hstr, *hptr;
1505 struct sha256_state sctx;
1506 ssize_t written = 0;
1510 sha256_update(&sctx, mdata->fw, mdata->fw_size);
1511 sha256_final(&sctx, hash);
1513 hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1518 for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1519 hptr += sprintf(hptr, "%02x", hash[i]);
1521 written = sysfs_emit(buf, "%s\n", hstr);
1527 static DEVICE_ATTR_RO(fw_buf_checksum);
1529 static struct attribute *cxl_mock_mem_attrs[] = {
1530 &dev_attr_security_lock.attr,
1531 &dev_attr_event_trigger.attr,
1532 &dev_attr_fw_buf_checksum.attr,
1535 ATTRIBUTE_GROUPS(cxl_mock_mem);
1537 static const struct platform_device_id cxl_mock_mem_ids[] = {
1538 { .name = "cxl_mem", 0 },
1539 { .name = "cxl_rcd", 1 },
1542 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1544 static struct platform_driver cxl_mock_mem_driver = {
1545 .probe = cxl_mock_mem_probe,
1546 .id_table = cxl_mock_mem_ids,
1548 .name = KBUILD_MODNAME,
1549 .dev_groups = cxl_mock_mem_groups,
1550 .groups = cxl_mock_mem_core_groups,
1554 module_platform_driver(cxl_mock_mem_driver);
1555 MODULE_LICENSE("GPL v2");
1556 MODULE_IMPORT_NS(CXL);