cxl/hdm: Track next decoder to allocate

The CXL specification enforces that endpoint decoders are committed in
hw instance id order. In preparation for adding dynamic DPA allocation,
record the hw instance id in endpoint decoders, and enforce allocations
to occur in hw instance id order.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784328827.1758207.9627538529944559954.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2022-05-24 12:04:58 -07:00
parent 2c8669033f
commit 0c33b39352
3 changed files with 18 additions and 0 deletions

View File

@ -160,6 +160,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{ {
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct resource *res = cxled->dpa_res; struct resource *res = cxled->dpa_res;
resource_size_t skip_start; resource_size_t skip_start;
@ -173,6 +174,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
__release_region(&cxlds->dpa_res, skip_start, cxled->skip); __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
cxled->skip = 0; cxled->skip = 0;
cxled->dpa_res = NULL; cxled->dpa_res = NULL;
port->hdm_end--;
} }
static void cxl_dpa_release(void *cxled) static void cxl_dpa_release(void *cxled)
@ -203,6 +205,18 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return -EBUSY; return -EBUSY;
} }
if (port->hdm_end + 1 != cxled->cxld.id) {
/*
* Assumes alloc and commit order is always in hardware instance
* order per expectations from 8.2.5.12.20 Committing Decoder
* Programming that enforce decoder[m] committed before
* decoder[m+1] commit start.
*/
dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
cxled->cxld.id, port->id, port->hdm_end + 1);
return -EBUSY;
}
if (skipped) { if (skipped) {
res = __request_region(&cxlds->dpa_res, base - skipped, skipped, res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
dev_name(&cxled->cxld.dev), 0); dev_name(&cxled->cxld.dev), 0);
@ -235,6 +249,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
cxled->cxld.id, cxled->dpa_res); cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED; cxled->mode = CXL_DECODER_MIXED;
} }
port->hdm_end++;
return 0; return 0;
} }

View File

@ -502,6 +502,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
port->component_reg_phys = component_reg_phys; port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida); ida_init(&port->decoder_ida);
port->hdm_end = -1;
INIT_LIST_HEAD(&port->dports); INIT_LIST_HEAD(&port->dports);
INIT_LIST_HEAD(&port->endpoints); INIT_LIST_HEAD(&port->endpoints);

View File

@ -333,6 +333,7 @@ struct cxl_nvdimm {
* @dports: cxl_dport instances referenced by decoders * @dports: cxl_dport instances referenced by decoders
* @endpoints: cxl_ep instances, endpoints that are a descendant of this port * @endpoints: cxl_ep instances, endpoints that are a descendant of this port
* @decoder_ida: allocator for decoder ids * @decoder_ida: allocator for decoder ids
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
* @component_reg_phys: component register capability base address (optional) * @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation * @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root. * @depth: How deep this port is relative to the root. depth 0 is the root.
@ -347,6 +348,7 @@ struct cxl_port {
struct list_head dports; struct list_head dports;
struct list_head endpoints; struct list_head endpoints;
struct ida decoder_ida; struct ida decoder_ida;
int hdm_end;
resource_size_t component_reg_phys; resource_size_t component_reg_phys;
bool dead; bool dead;
unsigned int depth; unsigned int depth;