]> git.itanic.dy.fi Git - linux-stable/commitdiff
Merge branch 'for-6.5/cxl-type-2' into for-6.5/cxl
authorDan Williams <dan.j.williams@intel.com>
Mon, 26 Jun 2023 00:16:51 +0000 (17:16 -0700)
committerDan Williams <dan.j.williams@intel.com>
Mon, 26 Jun 2023 00:16:51 +0000 (17:16 -0700)
Pick up the driver cleanups identified in preparation for CXL "type-2"
(accelerator) device support. The major change here from a conflict
generation perspective is the split of 'struct cxl_memdev_state' from
the core 'struct cxl_dev_state'. Since an accelerator may not care about
all the optional features that are standard on a CXL "type-3" (host-only
memory expander) device.

A silent conflict also occurs with the move of the endpoint port to be a
formal property of a 'struct cxl_memdev' rather than drvdata.

1  2 
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/pci.c
drivers/cxl/security.c
tools/testing/cxl/test/mem.c

index 31b1ac4c206d0c56467c5fefac2376c58c704f65,1990a5940b7c102648e660fbbfb1b9e2f012a08e..d6d067fbee970e49670e92746145566ed6925332
@@@ -1075,65 -1080,6 +1081,66 @@@ int cxl_dev_state_identify(struct cxl_m
  }
  EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
  
-  * @cxlds: The device data for the operation
 +/**
 + * cxl_mem_sanitize() - Send a sanitization command to the device.
- int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd)
++ * @mds: The device data for the operation
 + * @cmd: The specific sanitization command opcode
 + *
 + * Return: 0 if the command was executed successfully, regardless of
 + * whether or not the actual security operation is done in the background,
 + * such as for the Sanitize case.
 + * Error return values can be the result of the mailbox command, -EINVAL
 + * when security requirements are not met or invalid contexts.
 + *
 + * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
 + */
-       rc = cxl_internal_send_cmd(cxlds, &sec_cmd);
++int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
 +{
 +      int rc;
 +      u32 sec_out = 0;
 +      struct cxl_get_security_output {
 +              __le32 flags;
 +      } out;
 +      struct cxl_mbox_cmd sec_cmd = {
 +              .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
 +              .payload_out = &out,
 +              .size_out = sizeof(out),
 +      };
 +      struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +
 +      if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
 +              return -EINVAL;
 +
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++      rc = cxl_internal_send_cmd(mds, &sec_cmd);
 +      if (rc < 0) {
 +              dev_err(cxlds->dev, "Failed to get security state : %d", rc);
 +              return rc;
 +      }
 +
 +      /*
 +       * Prior to using these commands, any security applied to
 +       * the user data areas of the device shall be DISABLED (or
 +       * UNLOCKED for secure erase case).
 +       */
 +      sec_out = le32_to_cpu(out.flags);
 +      if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
 +              return -EINVAL;
 +
 +      if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
 +          sec_out & CXL_PMEM_SEC_STATE_LOCKED)
 +              return -EINVAL;
 +
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0) {
 +              dev_err(cxlds->dev, "Failed to sanitize device : %d", rc);
 +              return rc;
 +      }
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
 +
  static int add_dpa_res(struct device *dev, struct resource *parent,
                       struct resource *res, resource_size_t start,
                       resource_size_t size, const char *type)
index fd2e6b0f79c038daf927e87862845d73dafa5295,65a685e5616f78e0d52abc8d02ec9131ecbb4e27..90237b9487a761c7e22f1073c7a4244741210ba4
@@@ -109,88 -116,6 +118,89 @@@ static ssize_t numa_node_show(struct de
  }
  static DEVICE_ATTR_RO(numa_node);
  
-       unsigned long state = cxlds->security.state;
 +static ssize_t security_state_show(struct device *dev,
 +                                 struct device_attribute *attr,
 +                                 char *buf)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 +      struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
-       ssize_t rc;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +      u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
 +      u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
++      unsigned long state = mds->security.state;
 +
 +      if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
 +              return sysfs_emit(buf, "sanitize\n");
 +
 +      if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
 +              return sysfs_emit(buf, "disabled\n");
 +      if (state & CXL_PMEM_SEC_STATE_FROZEN ||
 +          state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT ||
 +          state & CXL_PMEM_SEC_STATE_USER_PLIMIT)
 +              return sysfs_emit(buf, "frozen\n");
 +      if (state & CXL_PMEM_SEC_STATE_LOCKED)
 +              return sysfs_emit(buf, "locked\n");
 +      else
 +              return sysfs_emit(buf, "unlocked\n");
 +}
 +static struct device_attribute dev_attr_security_state =
 +      __ATTR(state, 0444, security_state_show, NULL);
 +
 +static ssize_t security_sanitize_store(struct device *dev,
 +                                     struct device_attribute *attr,
 +                                     const char *buf, size_t len)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SANITIZE);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++      struct cxl_port *port = cxlmd->endpoint;
 +      bool sanitize;
++      ssize_t rc;
 +
 +      if (kstrtobool(buf, &sanitize) || !sanitize)
 +              return -EINVAL;
 +
 +      if (!port || !is_cxl_endpoint(port))
 +              return -EINVAL;
 +
 +      /* ensure no regions are mapped to this memdev */
 +      if (port->commit_end != -1)
 +              return -EBUSY;
 +
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_port *port = dev_get_drvdata(&cxlmd->dev);
++      rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
 +
 +      return rc ? rc : len;
 +}
 +static struct device_attribute dev_attr_security_sanitize =
 +      __ATTR(sanitize, 0200, NULL, security_sanitize_store);
 +
 +static ssize_t security_erase_store(struct device *dev,
 +                                  struct device_attribute *attr,
 +                                  const char *buf, size_t len)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       rc = cxl_mem_sanitize(cxlds, CXL_MBOX_OP_SECURE_ERASE);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
++      struct cxl_port *port = cxlmd->endpoint;
 +      ssize_t rc;
 +      bool erase;
 +
 +      if (kstrtobool(buf, &erase) || !erase)
 +              return -EINVAL;
 +
 +      if (!port || !is_cxl_endpoint(port))
 +              return -EINVAL;
 +
 +      /* ensure no regions are mapped to this memdev */
 +      if (port->commit_end != -1)
 +              return -EBUSY;
 +
++      rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
 +
 +      return rc ? rc : len;
 +}
 +static struct device_attribute dev_attr_security_erase =
 +      __ATTR(erase, 0200, NULL, security_erase_store);
 +
  static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
  {
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@@ -524,15 -439,6 +537,15 @@@ void clear_exclusive_cxl_commands(struc
  }
  EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
  
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
 +static void cxl_memdev_security_shutdown(struct device *dev)
 +{
 +      struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       if (cxlds->security.poll)
-               cancel_delayed_work_sync(&cxlds->security.poll_dwork);
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
 +
++      if (mds->security.poll)
++              cancel_delayed_work_sync(&mds->security.poll_dwork);
 +}
 +
  static void cxl_memdev_shutdown(struct device *dev)
  {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
@@@ -649,313 -556,6 +664,316 @@@ static int cxl_memdev_release_file(stru
        return 0;
  }
  
- static int cxl_mem_get_fw_info(struct cxl_dev_state *cxlds)
 +/**
 + * cxl_mem_get_fw_info - Get Firmware info
 + * @cxlds: The device data for the operation
 + *
 + * Retrieve firmware info for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.1 Get FW Info
 + */
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds)
 +{
 +      struct cxl_mbox_get_fw_info info;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      int rc;
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_GET_FW_INFO,
 +              .size_out = sizeof(info),
 +              .payload_out = &info,
 +      };
 +
-       cxlds->fw.num_slots = info.num_slots;
-       cxlds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0)
 +              return rc;
 +
-  * @cxlds: The device data for the operation
++      mds->fw.num_slots = info.num_slots;
++      mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK,
 +                                     info.slot_info);
 +
 +      return 0;
 +}
 +
 +/**
 + * cxl_mem_activate_fw - Activate Firmware
- static int cxl_mem_activate_fw(struct cxl_dev_state *cxlds, int slot)
++ * @mds: The device data for the operation
 + * @slot: slot number to activate
 + *
 + * Activate firmware in a given slot for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.3 Activate FW
 + */
-       if (slot == 0 || slot > cxlds->fw.num_slots)
++static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot)
 +{
 +      struct cxl_mbox_activate_fw activate;
 +      struct cxl_mbox_cmd mbox_cmd;
 +
-       return cxl_internal_send_cmd(cxlds, &mbox_cmd);
++      if (slot == 0 || slot > mds->fw.num_slots)
 +              return -EINVAL;
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_ACTIVATE_FW,
 +              .size_in = sizeof(activate),
 +              .payload_in = &activate,
 +      };
 +
 +      /* Only offline activation supported for now */
 +      activate.action = CXL_FW_ACTIVATE_OFFLINE;
 +      activate.slot = slot;
 +
-  * @cxlds: The device data for the operation
++      return cxl_internal_send_cmd(mds, &mbox_cmd);
 +}
 +
 +/**
 + * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer
- static int cxl_mem_abort_fw_xfer(struct cxl_dev_state *cxlds)
++ * @mds: The device data for the operation
 + *
 + * Abort an in-progress firmware transfer for the device specified.
 + *
 + * Return: 0 if no error: or the result of the mailbox command.
 + *
 + * See CXL-3.0 8.2.9.3.2 Transfer FW
 + */
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds)
 +{
 +      struct cxl_mbox_transfer_fw *transfer;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      int rc;
 +
 +      transfer = kzalloc(struct_size(transfer, data, 0), GFP_KERNEL);
 +      if (!transfer)
 +              return -ENOMEM;
 +
 +      /* Set a 1s poll interval and a total wait time of 30s */
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_TRANSFER_FW,
 +              .size_in = sizeof(*transfer),
 +              .payload_in = transfer,
 +              .poll_interval_ms = 1000,
 +              .poll_count = 30,
 +      };
 +
 +      transfer->action = CXL_FW_TRANSFER_ACTION_ABORT;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      kfree(transfer);
 +      return rc;
 +}
 +
 +static void cxl_fw_cleanup(struct fw_upload *fwl)
 +{
-       cxlds->fw.next_slot = 0;
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      mds->fw.next_slot = 0;
 +}
 +
 +static int cxl_fw_do_cancel(struct fw_upload *fwl)
 +{
-       rc = cxl_mem_abort_fw_xfer(cxlds);
++      struct cxl_memdev_state *mds = fwl->dd_handle;
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct cxl_memdev *cxlmd = cxlds->cxlmd;
 +      int rc;
 +
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      rc = cxl_mem_abort_fw_xfer(mds);
 +      if (rc < 0)
 +              dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc);
 +
 +      return FW_UPLOAD_ERR_CANCELED;
 +}
 +
 +static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data,
 +                                       u32 size)
 +{
-       cxlds->fw.oneshot = struct_size(transfer, data, size) <
-                           cxlds->payload_size;
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +      struct cxl_mbox_transfer_fw *transfer;
 +
 +      if (!size)
 +              return FW_UPLOAD_ERR_INVALID_SIZE;
 +
-       if (cxl_mem_get_fw_info(cxlds))
++      mds->fw.oneshot = struct_size(transfer, data, size) <
++                          mds->payload_size;
 +
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      if (cxl_mem_get_fw_info(mds))
 +              return FW_UPLOAD_ERR_HW_ERROR;
 +
 +      /*
 +       * So far no state has been changed, hence no other cleanup is
 +       * necessary. Simply return the cancelled status.
 +       */
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return FW_UPLOAD_ERR_CANCELED;
 +
 +      return FW_UPLOAD_ERR_NONE;
 +}
 +
 +static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data,
 +                                     u32 offset, u32 size, u32 *written)
 +{
-        * Pick transfer size based on cxlds->payload_size
-        * @size must bw 128-byte aligned, ->payload_size is a power of 2
-        * starting at 256 bytes, and sizeof(*transfer) is 128.
-        * These constraints imply that @cur_size will always be 128b aligned.
++      struct cxl_memdev_state *mds = fwl->dd_handle;
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct cxl_memdev *cxlmd = cxlds->cxlmd;
 +      struct cxl_mbox_transfer_fw *transfer;
 +      struct cxl_mbox_cmd mbox_cmd;
 +      u32 cur_size, remaining;
 +      size_t size_in;
 +      int rc;
 +
 +      *written = 0;
 +
 +      /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */
 +      if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) {
 +              dev_err(&cxlmd->dev,
 +                      "misaligned offset for FW transfer slice (%u)\n",
 +                      offset);
 +              return FW_UPLOAD_ERR_RW_ERROR;
 +      }
 +
 +      /*
-       cur_size = min_t(size_t, size, cxlds->payload_size - sizeof(*transfer));
++       * Pick transfer size based on mds->payload_size @size must bw 128-byte
++       * aligned, ->payload_size is a power of 2 starting at 256 bytes, and
++       * sizeof(*transfer) is 128.  These constraints imply that @cur_size
++       * will always be 128b aligned.
 +       */
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer));
 +
 +      remaining = size - cur_size;
 +      size_in = struct_size(transfer, data, cur_size);
 +
-       cxlds->fw.next_slot = (cxlds->fw.cur_slot % cxlds->fw.num_slots) + 1;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return cxl_fw_do_cancel(fwl);
 +
 +      /*
 +       * Slot numbers are 1-indexed
 +       * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1')
 +       * Check for rollover using modulo, and 1-index it by adding 1
 +       */
-       if (cxlds->fw.oneshot) {
++      mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1;
 +
 +      /* Do the transfer via mailbox cmd */
 +      transfer = kzalloc(size_in, GFP_KERNEL);
 +      if (!transfer)
 +              return FW_UPLOAD_ERR_RW_ERROR;
 +
 +      transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT);
 +      memcpy(transfer->data, data + offset, cur_size);
-               transfer->slot = cxlds->fw.next_slot;
++      if (mds->fw.oneshot) {
 +              transfer->action = CXL_FW_TRANSFER_ACTION_FULL;
-                       transfer->slot = cxlds->fw.next_slot;
++              transfer->slot = mds->fw.next_slot;
 +      } else {
 +              if (offset == 0) {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE;
 +              } else if (remaining == 0) {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_END;
-       rc = cxl_internal_send_cmd(cxlds, &mbox_cmd);
++                      transfer->slot = mds->fw.next_slot;
 +              } else {
 +                      transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE;
 +              }
 +      }
 +
 +      mbox_cmd = (struct cxl_mbox_cmd) {
 +              .opcode = CXL_MBOX_OP_TRANSFER_FW,
 +              .size_in = size_in,
 +              .payload_in = transfer,
 +              .poll_interval_ms = 1000,
 +              .poll_count = 30,
 +      };
 +
-       if (cxlds->fw.oneshot || remaining == 0) {
++      rc = cxl_internal_send_cmd(mds, &mbox_cmd);
 +      if (rc < 0) {
 +              rc = FW_UPLOAD_ERR_RW_ERROR;
 +              goto out_free;
 +      }
 +
 +      *written = cur_size;
 +
 +      /* Activate FW if oneshot or if the last slice was written */
-                       cxlds->fw.next_slot);
-               rc = cxl_mem_activate_fw(cxlds, cxlds->fw.next_slot);
++      if (mds->fw.oneshot || remaining == 0) {
 +              dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n",
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++                      mds->fw.next_slot);
++              rc = cxl_mem_activate_fw(mds, mds->fw.next_slot);
 +              if (rc < 0) {
 +                      dev_err(&cxlmd->dev, "Error activating firmware: %d\n",
 +                              rc);
 +                      rc = FW_UPLOAD_ERR_HW_ERROR;
 +                      goto out_free;
 +              }
 +      }
 +
 +      rc = FW_UPLOAD_ERR_NONE;
 +
 +out_free:
 +      kfree(transfer);
 +      return rc;
 +}
 +
 +static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl)
 +{
-       if (test_and_clear_bit(CXL_FW_CANCEL, cxlds->fw.state))
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
 +      /*
 +       * cxl_internal_send_cmd() handles background operations synchronously.
 +       * No need to wait for completions here - any errors would've been
 +       * reported and handled during the ->write() call(s).
 +       * Just check if a cancel request was received, and return success.
 +       */
-       struct cxl_dev_state *cxlds = fwl->dd_handle;
++      if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state))
 +              return cxl_fw_do_cancel(fwl);
 +
 +      return FW_UPLOAD_ERR_NONE;
 +}
 +
 +static void cxl_fw_cancel(struct fw_upload *fwl)
 +{
-       set_bit(CXL_FW_CANCEL, cxlds->fw.state);
++      struct cxl_memdev_state *mds = fwl->dd_handle;
 +
- int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds)
++      set_bit(CXL_FW_CANCEL, mds->fw.state);
 +}
 +
 +static const struct fw_upload_ops cxl_memdev_fw_ops = {
 +        .prepare = cxl_fw_prepare,
 +        .write = cxl_fw_write,
 +        .poll_complete = cxl_fw_poll_complete,
 +        .cancel = cxl_fw_cancel,
 +        .cleanup = cxl_fw_cleanup,
 +};
 +
 +static void devm_cxl_remove_fw_upload(void *fwl)
 +{
 +      firmware_upload_unregister(fwl);
 +}
 +
-       if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxlds->enabled_cmds))
++int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
 +{
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +      struct device *dev = &cxlds->cxlmd->dev;
 +      struct fw_upload *fwl;
 +      int rc;
 +
-                                      &cxl_memdev_fw_ops, cxlds);
++      if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
 +              return 0;
 +
 +      fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
++                                     &cxl_memdev_fw_ops, mds);
 +      if (IS_ERR(fwl))
 +              return dev_err_probe(dev, PTR_ERR(fwl),
 +                                   "Failed to register firmware loader\n");
 +
 +      rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
 +                                    fwl);
 +      if (rc)
 +              dev_err(dev,
 +                      "Failed to add firmware loader remove action: %d\n",
 +                      rc);
 +
 +      return rc;
 +}
 +EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
 +
  static const struct file_operations cxl_memdev_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = cxl_memdev_ioctl,
        .llseek = noop_llseek,
  };
  
-       struct cxl_dev_state *cxlds = data;
 +static void put_sanitize(void *data)
 +{
-       sysfs_put(cxlds->security.sanitize_node);
++      struct cxl_memdev_state *mds = data;
 +
-       cxlds->security.sanitize_node = sysfs_get_dirent(sec, "state");
++      sysfs_put(mds->security.sanitize_node);
 +}
 +
 +static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
 +{
 +      struct cxl_dev_state *cxlds = cxlmd->cxlds;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +      struct device *dev = &cxlmd->dev;
 +      struct kernfs_node *sec;
 +
 +      sec = sysfs_get_dirent(dev->kobj.sd, "security");
 +      if (!sec) {
 +              dev_err(dev, "sysfs_get_dirent 'security' failed\n");
 +              return -ENODEV;
 +      }
-       if (!cxlds->security.sanitize_node) {
++      mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
 +      sysfs_put(sec);
-       return devm_add_action_or_reset(cxlds->dev, put_sanitize, cxlds);
++      if (!mds->security.sanitize_node) {
 +              dev_err(dev, "sysfs_get_dirent 'state' failed\n");
 +              return -ENODEV;
 +      }
 +
++      return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
 + }
 +
  struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
  {
        struct cxl_memdev *cxlmd;
Simple merge
index ce6f085e5ba87ae589900fab09565a0b957a4509,9aa8876a4eeac8b563ed0a48e8fac984d2a08162..25234a491371c094241d0d0b9ed0571661e260fc
@@@ -83,7 -84,6 +85,8 @@@ static inline bool is_cxl_endpoint(stru
  }
  
  struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
- int cxl_memdev_setup_fw_upload(struct cxl_dev_state *cxlds);
++struct cxl_memdev_state;
++int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
  int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
                         resource_size_t base, resource_size_t len,
                         resource_size_t skipped);
@@@ -261,101 -256,20 +264,115 @@@ struct cxl_poison_state 
        struct mutex lock;  /* Protect reads of poison list */
  };
  
 +/*
 + * Get FW Info
 + * CXL rev 3.0 section 8.2.9.3.1; Table 8-56
 + */
 +struct cxl_mbox_get_fw_info {
 +      u8 num_slots;
 +      u8 slot_info;
 +      u8 activation_cap;
 +      u8 reserved[13];
 +      char slot_1_revision[16];
 +      char slot_2_revision[16];
 +      char slot_3_revision[16];
 +      char slot_4_revision[16];
 +} __packed;
 +
 +#define CXL_FW_INFO_SLOT_INFO_CUR_MASK                        GENMASK(2, 0)
 +#define CXL_FW_INFO_SLOT_INFO_NEXT_MASK                       GENMASK(5, 3)
 +#define CXL_FW_INFO_SLOT_INFO_NEXT_SHIFT              3
 +#define CXL_FW_INFO_ACTIVATION_CAP_HAS_LIVE_ACTIVATE  BIT(0)
 +
 +/*
 + * Transfer FW Input Payload
 + * CXL rev 3.0 section 8.2.9.3.2; Table 8-57
 + */
 +struct cxl_mbox_transfer_fw {
 +      u8 action;
 +      u8 slot;
 +      u8 reserved[2];
 +      __le32 offset;
 +      u8 reserved2[0x78];
 +      u8 data[];
 +} __packed;
 +
 +#define CXL_FW_TRANSFER_ACTION_FULL   0x0
 +#define CXL_FW_TRANSFER_ACTION_INITIATE       0x1
 +#define CXL_FW_TRANSFER_ACTION_CONTINUE       0x2
 +#define CXL_FW_TRANSFER_ACTION_END    0x3
 +#define CXL_FW_TRANSFER_ACTION_ABORT  0x4
 +
 +/*
 + * CXL rev 3.0 section 8.2.9.3.2 mandates 128-byte alignment for FW packages
 + * and for each part transferred in a Transfer FW command.
 + */
 +#define CXL_FW_TRANSFER_ALIGNMENT     128
 +
 +/*
 + * Activate FW Input Payload
 + * CXL rev 3.0 section 8.2.9.3.3; Table 8-58
 + */
 +struct cxl_mbox_activate_fw {
 +      u8 action;
 +      u8 slot;
 +} __packed;
 +
 +#define CXL_FW_ACTIVATE_ONLINE                0x0
 +#define CXL_FW_ACTIVATE_OFFLINE               0x1
 +
 +/* FW state bits */
 +#define CXL_FW_STATE_BITS             32
 +#define CXL_FW_CANCEL         BIT(0)
 +
 +/**
 + * struct cxl_fw_state - Firmware upload / activation state
 + *
 + * @state: fw_uploader state bitmask
 + * @oneshot: whether the fw upload fits in a single transfer
 + * @num_slots: Number of FW slots available
 + * @cur_slot: Slot number currently active
 + * @next_slot: Slot number for the new firmware
 + */
 +struct cxl_fw_state {
 +      DECLARE_BITMAP(state, CXL_FW_STATE_BITS);
 +      bool oneshot;
 +      int num_slots;
 +      int cur_slot;
 +      int next_slot;
 +};
 +
 +/**
 + * struct cxl_security_state - Device security state
 + *
 + * @state: state of last security operation
 + * @poll: polling for sanitization is enabled, device has no mbox irq support
 + * @poll_tmo_secs: polling timeout
 + * @poll_dwork: polling work item
 + * @sanitize_node: sanitation sysfs file to notify
 + */
 +struct cxl_security_state {
 +      unsigned long state;
 +      bool poll;
 +      int poll_tmo_secs;
 +      struct delayed_work poll_dwork;
 +      struct kernfs_node *sanitize_node;
 +};
 +
+ /*
+  * enum cxl_devtype - delineate type-2 from a generic type-3 device
+  * @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or
+  *                     HDM-DB, no requirement that this device implements a
+  *                     mailbox, or other memory-device-standard manageability
+  *                     flows.
+  * @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with
+  *                       HDM-H and class-mandatory memory device registers
+  */
+ enum cxl_devtype {
+       CXL_DEVTYPE_DEVMEM,
+       CXL_DEVTYPE_CLASSMEM,
+ };
  /**
   * struct cxl_dev_state - The driver device state
   *
@@@ -388,26 -329,15 +432,16 @@@ struct cxl_dev_state 
   * @active_persistent_bytes: sum of hard + soft persistent
   * @next_volatile_bytes: volatile capacity change pending device reset
   * @next_persistent_bytes: persistent capacity change pending device reset
-  * @component_reg_phys: register base of component registers
-  * @info: Cached DVSEC information about the device.
-  * @serial: PCIe Device Serial Number
   * @event: event log driver state
   * @poison: poison driver state info
 + * @fw: firmware upload / activation state
   * @mbox_send: @dev specific transport for transmitting mailbox commands
   *
-  * See section 8.2.9.5.2 Capacity Configuration and Label Storage for
+  * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
   * details on capacity parameters.
   */
- struct cxl_dev_state {
-       struct device *dev;
-       struct cxl_memdev *cxlmd;
-       struct cxl_regs regs;
-       int cxl_dvsec;
-       bool rcd;
-       bool media_ready;
+ struct cxl_memdev_state {
+       struct cxl_dev_state cxlds;
        size_t payload_size;
        size_t lsa_size;
        struct mutex mbox_mutex; /* Protects device mailbox and firmware */
        u64 active_persistent_bytes;
        u64 next_volatile_bytes;
        u64 next_persistent_bytes;
-       resource_size_t component_reg_phys;
-       u64 serial;
        struct cxl_event_state event;
        struct cxl_poison_state poison;
-       int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
 +      struct cxl_security_state security;
 +      struct cxl_fw_state fw;
 +
 +      struct rcuwait mbox_wait;
+       int (*mbox_send)(struct cxl_memdev_state *mds,
+                        struct cxl_mbox_cmd *cmd);
  };
  
+ static inline struct cxl_memdev_state *
+ to_cxl_memdev_state(struct cxl_dev_state *cxlds)
+ {
+       if (cxlds->type != CXL_DEVTYPE_CLASSMEM)
+               return NULL;
+       return container_of(cxlds, struct cxl_memdev_state, cxlds);
+ }
  enum cxl_opcode {
        CXL_MBOX_OP_INVALID             = 0x0000,
        CXL_MBOX_OP_RAW                 = CXL_MBOX_OP_INVALID,
@@@ -831,8 -756,6 +867,8 @@@ static inline void cxl_mem_active_dec(v
  }
  #endif
  
- int cxl_mem_sanitize(struct cxl_dev_state *cxlds, u16 cmd);
++int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
 +
  struct cxl_hdm {
        struct cxl_component_regs regs;
        unsigned int decoder_count;
index 4468f53ba5a89b1074cc198ea258b7a5d076162c,3f78082014cce9de37cc5fd3f3c03aef4aed9ef3..18cfb7ae17a3782ff783a031e21d59dd30eb0fec
@@@ -84,92 -84,9 +84,92 @@@ static int cxl_pci_mbox_wait_for_doorbe
                            status & CXLMDEV_DEV_FATAL ? " fatal" : "",        \
                            status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
  
-               if (cxlds->security.sanitize_node)
-                       sysfs_notify_dirent(cxlds->security.sanitize_node);
 +struct cxl_dev_id {
 +      struct cxl_dev_state *cxlds;
 +};
 +
 +static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
 +                         irq_handler_t handler, irq_handler_t thread_fn)
 +{
 +      struct device *dev = cxlds->dev;
 +      struct cxl_dev_id *dev_id;
 +
 +      /* dev_id must be globally unique and must contain the cxlds */
 +      dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
 +      if (!dev_id)
 +              return -ENOMEM;
 +      dev_id->cxlds = cxlds;
 +
 +      return devm_request_threaded_irq(dev, irq, handler, thread_fn,
 +                                       IRQF_SHARED | IRQF_ONESHOT,
 +                                       NULL, dev_id);
 +}
 +
 +static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
 +{
 +      u64 reg;
 +
 +      reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
 +}
 +
 +static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
 +{
 +      u64 reg;
 +      u16 opcode;
 +      struct cxl_dev_id *dev_id = id;
 +      struct cxl_dev_state *cxlds = dev_id->cxlds;
++      struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
 +
 +      if (!cxl_mbox_background_complete(cxlds))
 +              return IRQ_NONE;
 +
 +      reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +      opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
 +      if (opcode == CXL_MBOX_OP_SANITIZE) {
-               rcuwait_wake_up(&cxlds->mbox_wait);
++              if (mds->security.sanitize_node)
++                      sysfs_notify_dirent(mds->security.sanitize_node);
 +
 +              dev_dbg(cxlds->dev, "Sanitization operation ended\n");
 +      } else {
 +              /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
-       struct cxl_dev_state *cxlds;
-       cxlds = container_of(work,
-                            struct cxl_dev_state, security.poll_dwork.work);
++              rcuwait_wake_up(&mds->mbox_wait);
 +      }
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/*
 + * Sanitization operation polling mode.
 + */
 +static void cxl_mbox_sanitize_work(struct work_struct *work)
 +{
-       mutex_lock(&cxlds->mbox_mutex);
++      struct cxl_memdev_state *mds =
++              container_of(work, typeof(*mds), security.poll_dwork.work);
++      struct cxl_dev_state *cxlds = &mds->cxlds;
 +
-               cxlds->security.poll_tmo_secs = 0;
++      mutex_lock(&mds->mbox_mutex);
 +      if (cxl_mbox_background_complete(cxlds)) {
-               if (cxlds->security.sanitize_node)
-                       sysfs_notify_dirent(cxlds->security.sanitize_node);
++              mds->security.poll_tmo_secs = 0;
 +              put_device(cxlds->dev);
 +
-               int timeout = cxlds->security.poll_tmo_secs + 10;
++              if (mds->security.sanitize_node)
++                      sysfs_notify_dirent(mds->security.sanitize_node);
 +
 +              dev_dbg(cxlds->dev, "Sanitization operation ended\n");
 +      } else {
-               cxlds->security.poll_tmo_secs = min(15 * 60, timeout);
-               queue_delayed_work(system_wq, &cxlds->security.poll_dwork,
++              int timeout = mds->security.poll_tmo_secs + 10;
 +
-       mutex_unlock(&cxlds->mbox_mutex);
++              mds->security.poll_tmo_secs = min(15 * 60, timeout);
++              queue_delayed_work(system_wq, &mds->security.poll_dwork,
 +                                 timeout * HZ);
 +      }
++      mutex_unlock(&mds->mbox_mutex);
 +}
 +
  /**
   * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
-  * @cxlds: The device state to communicate with.
+  * @mds: The memory device driver data
   * @mbox_cmd: Command to send to the memory device.
   *
   * Context: Any context. Expects mbox_mutex to be held.
@@@ -227,16 -145,6 +228,16 @@@ static int __cxl_pci_mbox_send_cmd(stru
                return -EBUSY;
        }
  
-       if (cxlds->security.poll_tmo_secs > 0) {
 +      /*
 +       * With sanitize polling, hardware might be done and the poller still
 +       * not be in sync. Ensure no new command comes in until so. Keep the
 +       * hardware semantics and only allow device health status.
 +       */
++      if (mds->security.poll_tmo_secs > 0) {
 +              if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
 +                      return -EBUSY;
 +      }
 +
        cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
                             mbox_cmd->opcode);
        if (mbox_cmd->size_in) {
        mbox_cmd->return_code =
                FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
  
-                       if (cxlds->security.poll_tmo_secs != -1) {
 +      /*
 +       * Handle the background command in a synchronous manner.
 +       *
 +       * All other mailbox commands will serialize/queue on the mbox_mutex,
 +       * which we currently hold. Furthermore this also guarantees that
 +       * cxl_mbox_background_complete() checks are safe amongst each other,
 +       * in that no new bg operation can occur in between.
 +       *
 +       * Background operations are timesliced in accordance with the nature
 +       * of the command. In the event of timeout, the mailbox state is
 +       * indeterminate until the next successful command submission and the
 +       * driver can get back in sync with the hardware state.
 +       */
 +      if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
 +              u64 bg_status_reg;
 +              int i, timeout;
 +
 +              /*
 +               * Sanitization is a special case which monopolizes the device
 +               * and cannot be timesliced. Handle asynchronously instead,
 +               * and allow userspace to poll(2) for completion.
 +               */
 +              if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
-                               cxlds->security.poll_tmo_secs = timeout;
++                      if (mds->security.poll_tmo_secs != -1) {
 +                              /* hold the device throughout */
 +                              get_device(cxlds->dev);
 +
 +                              /* give first timeout a second */
 +                              timeout = 1;
-                                                  &cxlds->security.poll_dwork,
++                              mds->security.poll_tmo_secs = timeout;
 +                              queue_delayed_work(system_wq,
-                       if (rcuwait_wait_event_timeout(&cxlds->mbox_wait,
++                                                 &mds->security.poll_dwork,
 +                                                 timeout * HZ);
 +                      }
 +
 +                      dev_dbg(dev, "Sanitization operation started\n");
 +                      goto success;
 +              }
 +
 +              dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
 +                      mbox_cmd->opcode);
 +
 +              timeout = mbox_cmd->poll_interval_ms;
 +              for (i = 0; i < mbox_cmd->poll_count; i++) {
++                      if (rcuwait_wait_event_timeout(&mds->mbox_wait,
 +                                     cxl_mbox_background_complete(cxlds),
 +                                     TASK_UNINTERRUPTIBLE,
 +                                     msecs_to_jiffies(timeout)) > 0)
 +                              break;
 +              }
 +
 +              if (!cxl_mbox_background_complete(cxlds)) {
 +                      dev_err(dev, "timeout waiting for background (%d ms)\n",
 +                              timeout * mbox_cmd->poll_count);
 +                      return -ETIMEDOUT;
 +              }
 +
 +              bg_status_reg = readq(cxlds->regs.mbox +
 +                                    CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 +              mbox_cmd->return_code =
 +                      FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
 +                                bg_status_reg);
 +              dev_dbg(dev,
 +                      "Mailbox background operation (0x%04x) completed\n",
 +                      mbox_cmd->opcode);
 +      }
 +
        if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
                dev_dbg(dev, "Mailbox operation had an error: %s\n",
                        cxl_mbox_cmd_rc2str(mbox_cmd));
@@@ -429,37 -272,8 +433,36 @@@ static int cxl_pci_setup_mailbox(struc
                return -ENXIO;
        }
  
-       dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
-               cxlds->payload_size);
+       dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
  
-       rcuwait_init(&cxlds->mbox_wait);
++      rcuwait_init(&mds->mbox_wait);
 +
 +      if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
 +              u32 ctrl;
 +              int irq, msgnum;
 +              struct pci_dev *pdev = to_pci_dev(cxlds->dev);
 +
 +              msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
 +              irq = pci_irq_vector(pdev, msgnum);
 +              if (irq < 0)
 +                      goto mbox_poll;
 +
 +              if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
 +                      goto mbox_poll;
 +
 +              /* enable background command mbox irq support */
 +              ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 +              ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
 +              writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 +
 +              return 0;
 +      }
 +
 +mbox_poll:
-       cxlds->security.poll = true;
-       INIT_DELAYED_WORK(&cxlds->security.poll_dwork, cxl_mbox_sanitize_work);
++      mds->security.poll = true;
++      INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
 +
 +      dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
        return 0;
  }
  
@@@ -694,10 -507,12 +684,10 @@@ static int cxl_event_req_irq(struct cxl
        if (irq < 0)
                return irq;
  
 -      return devm_request_threaded_irq(dev, irq, NULL, cxl_event_thread,
 -                                       IRQF_SHARED | IRQF_ONESHOT, NULL,
 -                                       dev_id);
 +      return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
  }
  
- static int cxl_event_get_int_policy(struct cxl_dev_state *cxlds,
+ static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
                                    struct cxl_event_interrupt_policy *policy)
  {
        struct cxl_mbox_cmd mbox_cmd = {
@@@ -889,11 -708,7 +883,11 @@@ static int cxl_pci_probe(struct pci_de
        else
                dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
  
-       rc = cxl_pci_setup_mailbox(cxlds);
 +      rc = cxl_alloc_irq_vectors(pdev);
 +      if (rc)
 +              return rc;
 +
+       rc = cxl_pci_setup_mailbox(mds);
        if (rc)
                return rc;
  
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
  
-       rc = cxl_memdev_setup_fw_upload(cxlds);
++      rc = cxl_memdev_setup_fw_upload(mds);
 +      if (rc)
 +              return rc;
 +
-       rc = cxl_event_config(host_bridge, cxlds);
+       rc = cxl_event_config(host_bridge, mds);
        if (rc)
                return rc;
  
index 9da6785dfd315e5fcf16e66ce08ad08ec78eb714,8c98fc674fa761d77b190ffca6f08cd1499b1ece..21856a3f408eee530c69da2748e7f1a0c311d9d8
@@@ -34,9 -34,6 +34,9 @@@ static unsigned long cxl_pmem_get_secur
                return 0;
  
        sec_out = le32_to_cpu(out.flags);
-       cxlds->security.state = sec_out;
 +      /* cache security state */
++      mds->security.state = sec_out;
 +
        if (ptype == NVDIMM_MASTER) {
                if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)
                        set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
index 420f01106b52c52e34fc2a08f23706f5bc90ddb4,6fb5718588f372387dfe7ad3cc1006a21072f1a7..464fc39ed2776b5ea1f89d2e82b7d072fd21424c
@@@ -570,57 -532,9 +567,52 @@@ static int mock_partition_info(struct c
        return 0;
  }
  
- static int mock_sanitize(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
++static int mock_sanitize(struct cxl_mockmem_data *mdata,
++                       struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      if (cmd->size_in != 0)
 +              return -EINVAL;
 +
 +      if (cmd->size_out != 0)
 +              return -EINVAL;
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      return 0; /* assume less than 2 secs, no bg */
 +}
 +
- static int mock_secure_erase(struct cxl_dev_state *cxlds,
++static int mock_secure_erase(struct cxl_mockmem_data *mdata,
 +                           struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      if (cmd->size_in != 0)
 +              return -EINVAL;
 +
 +      if (cmd->size_out != 0)
 +              return -EINVAL;
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
 +              cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
 +              return -ENXIO;
 +      }
 +
 +      return 0;
 +}
 +
- static int mock_get_security_state(struct cxl_dev_state *cxlds,
+ static int mock_get_security_state(struct cxl_mockmem_data *mdata,
                                   struct cxl_mbox_cmd *cmd)
  {
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
        if (cmd->size_in)
                return -EINVAL;
  
@@@ -1195,90 -1105,12 +1183,90 @@@ static struct attribute *cxl_mock_mem_c
  };
  ATTRIBUTE_GROUPS(cxl_mock_mem_core);
  
- static int mock_fw_info(struct cxl_dev_state *cxlds,
-                           struct cxl_mbox_cmd *cmd)
++static int mock_fw_info(struct cxl_mockmem_data *mdata,
++                      struct cxl_mbox_cmd *cmd)
 +{
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      struct cxl_mbox_get_fw_info fw_info = {
 +              .num_slots = FW_SLOTS,
 +              .slot_info = (mdata->fw_slot & 0x7) |
 +                           ((mdata->fw_staged & 0x7) << 3),
 +              .activation_cap = 0,
 +      };
 +
 +      strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
 +      strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
 +      strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
 +      strcpy(fw_info.slot_4_revision, "");
 +
 +      if (cmd->size_out < sizeof(fw_info))
 +              return -EINVAL;
 +
 +      memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
 +      return 0;
 +}
 +
- static int mock_transfer_fw(struct cxl_dev_state *cxlds,
++static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
 +                          struct cxl_mbox_cmd *cmd)
 +{
 +      struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +      void *fw = mdata->fw;
 +      size_t offset, length;
 +
 +      offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
 +      length = cmd->size_in - sizeof(*transfer);
 +      if (offset + length > FW_SIZE)
 +              return -EINVAL;
 +
 +      switch (transfer->action) {
 +      case CXL_FW_TRANSFER_ACTION_FULL:
 +              if (offset != 0)
 +                      return -EINVAL;
 +              fallthrough;
 +      case CXL_FW_TRANSFER_ACTION_END:
 +              if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
 +                      return -EINVAL;
 +              mdata->fw_size = offset + length;
 +              break;
 +      case CXL_FW_TRANSFER_ACTION_INITIATE:
 +      case CXL_FW_TRANSFER_ACTION_CONTINUE:
 +              break;
 +      case CXL_FW_TRANSFER_ACTION_ABORT:
 +              return 0;
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      memcpy(fw + offset, transfer->data, length);
 +      return 0;
 +}
 +
- static int mock_activate_fw(struct cxl_dev_state *cxlds,
++static int mock_activate_fw(struct cxl_mockmem_data *mdata,
 +                          struct cxl_mbox_cmd *cmd)
 +{
 +      struct cxl_mbox_activate_fw *activate = cmd->payload_in;
-       struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
 +
 +      if (activate->slot == 0 || activate->slot > FW_SLOTS)
 +              return -EINVAL;
 +
 +      switch (activate->action) {
 +      case CXL_FW_ACTIVATE_ONLINE:
 +              mdata->fw_slot = activate->slot;
 +              mdata->fw_staged = 0;
 +              return 0;
 +      case CXL_FW_ACTIVATE_OFFLINE:
 +              mdata->fw_staged = activate->slot;
 +              return 0;
 +      }
 +
 +      return -EINVAL;
 +}
 +
- static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+ static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+                             struct cxl_mbox_cmd *cmd)
  {
+       struct cxl_dev_state *cxlds = &mds->cxlds;
        struct device *dev = cxlds->dev;
+       struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
        int rc = -EIO;
  
        switch (cmd->opcode) {
                break;
        case CXL_MBOX_OP_IDENTIFY:
                if (cxlds->rcd)
-                       rc = mock_rcd_id(cxlds, cmd);
+                       rc = mock_rcd_id(cmd);
                else
-                       rc = mock_id(cxlds, cmd);
+                       rc = mock_id(cmd);
                break;
        case CXL_MBOX_OP_GET_LSA:
-               rc = mock_get_lsa(cxlds, cmd);
+               rc = mock_get_lsa(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_PARTITION_INFO:
-               rc = mock_partition_info(cxlds, cmd);
+               rc = mock_partition_info(cmd);
                break;
        case CXL_MBOX_OP_GET_EVENT_RECORD:
-               rc = mock_get_event(cxlds, cmd);
+               rc = mock_get_event(dev, cmd);
                break;
        case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
-               rc = mock_clear_event(cxlds, cmd);
+               rc = mock_clear_event(dev, cmd);
                break;
        case CXL_MBOX_OP_SET_LSA:
-               rc = mock_set_lsa(cxlds, cmd);
+               rc = mock_set_lsa(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_HEALTH_INFO:
-               rc = mock_health_info(cxlds, cmd);
+               rc = mock_health_info(cmd);
                break;
-               rc = mock_sanitize(cxlds, cmd);
 +      case CXL_MBOX_OP_SANITIZE:
-               rc = mock_secure_erase(cxlds, cmd);
++              rc = mock_sanitize(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_SECURE_ERASE:
++              rc = mock_secure_erase(mdata, cmd);
 +              break;
        case CXL_MBOX_OP_GET_SECURITY_STATE:
-               rc = mock_get_security_state(cxlds, cmd);
+               rc = mock_get_security_state(mdata, cmd);
                break;
        case CXL_MBOX_OP_SET_PASSPHRASE:
-               rc = mock_set_passphrase(cxlds, cmd);
+               rc = mock_set_passphrase(mdata, cmd);
                break;
        case CXL_MBOX_OP_DISABLE_PASSPHRASE:
-               rc = mock_disable_passphrase(cxlds, cmd);
+               rc = mock_disable_passphrase(mdata, cmd);
                break;
        case CXL_MBOX_OP_FREEZE_SECURITY:
-               rc = mock_freeze_security(cxlds, cmd);
+               rc = mock_freeze_security(mdata, cmd);
                break;
        case CXL_MBOX_OP_UNLOCK:
-               rc = mock_unlock_security(cxlds, cmd);
+               rc = mock_unlock_security(mdata, cmd);
                break;
        case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
-               rc = mock_passphrase_secure_erase(cxlds, cmd);
+               rc = mock_passphrase_secure_erase(mdata, cmd);
                break;
        case CXL_MBOX_OP_GET_POISON:
                rc = mock_get_poison(cxlds, cmd);
        case CXL_MBOX_OP_CLEAR_POISON:
                rc = mock_clear_poison(cxlds, cmd);
                break;
-               rc = mock_fw_info(cxlds, cmd);
 +      case CXL_MBOX_OP_GET_FW_INFO:
-               rc = mock_transfer_fw(cxlds, cmd);
++              rc = mock_fw_info(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_TRANSFER_FW:
-               rc = mock_activate_fw(cxlds, cmd);
++              rc = mock_transfer_fw(mdata, cmd);
 +              break;
 +      case CXL_MBOX_OP_ACTIVATE_FW:
++              rc = mock_activate_fw(mdata, cmd);
 +              break;
        default:
                break;
        }
@@@ -1418,18 -1226,16 +1407,20 @@@ static int cxl_mock_mem_probe(struct pl
        if (rc)
                return rc;
  
-       cxlds = cxl_dev_state_create(dev);
-       if (IS_ERR(cxlds))
-               return PTR_ERR(cxlds);
 +      rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
 +      if (rc)
 +              return rc;
 +
+       mds = cxl_memdev_state_create(dev);
+       if (IS_ERR(mds))
+               return PTR_ERR(mds);
+       mds->mbox_send = cxl_mock_mbox_send;
+       mds->payload_size = SZ_4K;
+       mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
  
+       cxlds = &mds->cxlds;
        cxlds->serial = pdev->id;
-       cxlds->mbox_send = cxl_mock_mbox_send;
-       cxlds->payload_size = SZ_4K;
-       cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
        if (is_rcd(pdev)) {
                cxlds->rcd = true;
                cxlds->component_reg_phys = CXL_RESOURCE_NONE;
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
  
-       rc = cxl_memdev_setup_fw_upload(cxlds);
++      rc = cxl_memdev_setup_fw_upload(mds);
 +      if (rc)
 +              return rc;
 +
-       cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+       cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
  
        return 0;
  }