Commit e812928be2ee for kernel
commit e812928be2ee1c2744adf20ed04e0ce1e2fc5c13
Merge: cebcffe666cc 63fbf275fa9f
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date: Thu Feb 12 16:33:05 2026 -0800
Merge tag 'cxl-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull CXL updates from Dave Jiang:
- Introduce cxl_memdev_attach and pave way for soft reserved handling,
type2 accelerator enabling, and LSA 2.0 enabling. All these series
require the endpoint driver to settle before continuing the memdev
driver probe.
- Address CXL port error protocol handling and reporting.
The large patch series was split into three parts. The first two
parts are included here with the final part coming later.
The first part consists of a series of code refactoring to PCI AER
sub-system that addresses CXL and also CXL RAS code to prepare for
port error handling.
The second part refactors the CXL code to move management of
component registers to cxl_port objects to allow all CXL AER errors
to be handled through the cxl_port hierarchy.
- Provide AMD Zen5 platform address translation for CXL using ACPI
PRMT. This includes a conventions document to explain why this is
needed and how it's implemented.
- Misc CXL patches of fixes, cleanups, and updates. Including CXL
address translation for unaligned MOD3 regions.
[ TLA service: CXL is "Compute Express Link" ]
* tag 'cxl-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (59 commits)
cxl: Disable HPA/SPA translation handlers for Normalized Addressing
cxl/region: Factor out code into cxl_region_setup_poison()
cxl/atl: Lock decoders that need address translation
cxl: Enable AMD Zen5 address translation using ACPI PRMT
cxl/acpi: Prepare use of EFI runtime services
cxl: Introduce callback for HPA address ranges translation
cxl/region: Use region data to get the root decoder
cxl/region: Add @hpa_range argument to function cxl_calc_interleave_pos()
cxl/region: Separate region parameter setup and region construction
cxl: Simplify cxl_root_ops allocation and handling
cxl/region: Store HPA range in struct cxl_region
cxl/region: Store root decoder in struct cxl_region
cxl/region: Rename misleading variable name @hpa to @hpa_range
Documentation/driver-api/cxl: ACPI PRM Address Translation Support and AMD Zen5 enablement
cxl, doc: Moving conventions in separate files
cxl, doc: Remove isonum.txt inclusion
cxl/port: Unify endpoint and switch port lookup
cxl/port: Move endpoint component register management to cxl_port
cxl/port: Map Port RAS registers
cxl/port: Move dport RAS setup to dport add time
...
diff --cc arch/x86/kernel/e820.c
index 97b54bd0f482,69c050f50e18..2a9992758933
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@@ -1171,36 -1146,31 +1171,42 @@@ __init static unsigned long ram_alignme
#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
-void __init e820__reserve_resources_late(void)
+__init void e820__reserve_resources_late(void)
{
- u32 idx;
- int i;
-- struct resource *res;
-
+ /*
+ * Register device address regions listed in the E820 map,
+ * these can be claimed by device drivers later on:
+ */
- res = e820_res;
- for (idx = 0; idx < e820_table->nr_entries; idx++) {
- if (!res->parent && res->end)
++ for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
++ struct resource *res = e820_res + idx;
+
- for (i = 0, res = e820_res; i < e820_table->nr_entries; i++, res++) {
+ /* skip added or uninitialized resources */
+ if (res->parent || !res->end)
+ continue;
+
+ /* set aside soft-reserved resources for driver consideration */
+ if (res->desc == IORES_DESC_SOFT_RESERVED) {
+ insert_resource_expand_to_fit(&soft_reserve_resource, res);
+ } else {
+ /* publish the rest immediately */
insert_resource_expand_to_fit(&iomem_resource, res);
- res++;
+ }
}
/*
- * Try to bump up RAM regions to reasonable boundaries, to
- * avoid stolen RAM:
+ * Create additional 'gaps' at the end of RAM regions,
+ * rounding them up to 64k/1MB/64MB boundaries, should
+ * they be weirdly sized, and register extra, locked
+ * resource regions for them, to make sure drivers
+ * won't claim those addresses.
+ *
+ * These are basically blind guesses and heuristics to
+ * avoid resource conflicts with broken firmware that
+ * doesn't properly list 'stolen RAM' as a system region
+ * in the E820 map.
*/
- for (idx = 0; idx < e820_table->nr_entries; idx++) {
- for (i = 0; i < e820_table->nr_entries; i++) {
- struct e820_entry *entry = &e820_table->entries[i];
++ for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
+ struct e820_entry *entry = &e820_table->entries[idx];
u64 start, end;
if (entry->type != E820_TYPE_RAM)
diff --cc drivers/cxl/core/region.c
index 5bd1213737fa,bd4c4a4a27da..08fa3deef70a
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@@ -3115,10 -3257,12 +3257,12 @@@ static bool region_is_unaligned_mod3(st
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_root_decoder *cxlrd = cxlr->cxlrd;
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
- u64 dpa_offset, hpa_offset, hpa;
+ u64 base, dpa_offset, hpa_offset, hpa;
+ bool unaligned = false;
u16 eig = 0;
u8 eiw = 0;
int pos;
@@@ -3132,21 -3283,32 +3283,38 @@@
if (!cxled)
return ULLONG_MAX;
- pos = cxled->pos;
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
-
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+ base = cxl_dpa_resource_start(cxled);
+ if (base == RESOURCE_SIZE_MAX)
+ return ULLONG_MAX;
+
+ dpa_offset = dpa - base;
+
+ /* Unaligned calc for MOD3 interleaves not hbiw * 256MB aligned */
+ unaligned = region_is_unaligned_mod3(cxlr);
+ if (unaligned) {
+ hpa = unaligned_dpa_to_hpa(cxld, p, cxled->pos, dpa_offset);
+ if (hpa == ULLONG_MAX)
+ return ULLONG_MAX;
+
+ goto skip_aligned;
+ }
+ /*
+ * Aligned calc for all power-of-2 interleaves and for MOD3
+ * interleaves that are aligned at hbiw * 256MB
+ */
+ pos = cxled->pos;
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
+ if (hpa_offset == ULLONG_MAX)
+ return ULLONG_MAX;
/* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start + p->cache_size;
+ hpa = hpa_offset + p->res->start;
+
+ skip_aligned:
+ hpa += p->cache_size;
/* Root decoder translation overrides typical modulo decode */
if (cxlrd->ops.hpa_to_spa)
@@@ -3177,10 -3379,9 +3388,10 @@@ static int region_offset_to_dpa_result(
struct dpa_result *result)
{
struct cxl_region_params *p = &cxlr->params;
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_root_decoder *cxlrd = cxlr->cxlrd;
struct cxl_endpoint_decoder *cxled;
- u64 hpa, hpa_offset, dpa_offset;
+ u64 hpa_offset = offset;
+ u64 dpa, dpa_offset;
u16 eig = 0;
u8 eiw = 0;
int pos;
@@@ -3197,15 -3398,16 +3408,19 @@@
* CXL HPA is assumed to equal SPA.
*/
if (cxlrd->ops.spa_to_hpa) {
- hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
- hpa_offset = hpa - p->res->start;
- } else {
- hpa_offset = offset;
+ hpa_offset = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
+ if (hpa_offset == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev, "HPA not found for %pr offset %#llx\n",
+ p->res, offset);
+ return -ENXIO;
+ }
+ hpa_offset -= p->res->start;
}
+ if (region_is_unaligned_mod3(cxlr))
+ return unaligned_region_offset_to_dpa_result(cxlr, offset,
+ result);
+
pos = cxl_calculate_position(hpa_offset, eiw, eig);
if (pos < 0 || pos >= p->nr_targets) {
dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",