aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2023-04-03 16:01:32 -0700
committerDan Williams <dan.j.williams@intel.com>2023-04-04 15:34:34 -0700
commit24b18197184ac39bb8566fb82c0bf788bcd0d45b (patch)
tree6cd1946fc5af84f051cbb44d10bd7ef82c6f7fac
parent52cc48ad2a76a5fe82d239044d67944bbb928de6 (diff)
downloadnvme-24b18197184ac39bb8566fb82c0bf788bcd0d45b.tar.gz
cxl/hdm: Extend DVSEC range register emulation for region enumeration
One motivation for mapping range registers to decoder objects is to use those settings for region autodiscovery. The need to map a region for devices programmed to use range registers is especially urgent now that the kernel no longer routes "Soft Reserved" ranges in the memory map to device-dax by default. The CXL memory range loses all access mechanisms. Complete the implementation by marking the DPA reservation and setting the endpoint-decoder state to signal autodiscovery. Note that the default settings of ways=1 and granularity=4096 set in cxl_decode_init() do not need to be updated. Fixes: 09d09e04d2fc ("cxl/dax: Create dax devices for CXL RAM regions") Tested-by: Dave Jiang <dave.jiang@intel.com> Tested-by: Gregory Price <gregory.price@memverge.com> Link: https://lore.kernel.org/r/168012575521.221280.14177293493678527326.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/cxl/core/hdm.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 9884b6d4d930d..02cc2c38b44ba 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -738,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
- struct cxl_decoder *cxld, int which,
- struct cxl_endpoint_dvsec_info *info)
+static int cxl_setup_hdm_decoder_from_dvsec(
+ struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+ int which, struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_endpoint_decoder *cxled;
+ u64 len;
+ int rc;
+
if (!is_cxl_endpoint(port))
return -EOPNOTSUPP;
- if (!range_len(&info->dvsec_range[which]))
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ len = range_len(&info->dvsec_range[which]);
+ if (!len)
return -ENOENT;
cxld->target_type = CXL_DECODER_EXPANDER;
@@ -760,6 +766,16 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
port->commit_end = cxld->id;
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+ return rc;
+ }
+ *dpa_base += len;
+ cxled->state = CXL_DECODER_STATE_AUTO;
+
return 0;
}
@@ -779,7 +795,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
} target_list;
if (should_emulate_decoders(info))
- return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
+ return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+ which, info);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));