Skip to content

Commit fe51473

Browse files
committed
libnvdimm: fix blk free space accounting
Commit a1f3e4d "libnvdimm, region: update nd_region_available_dpa() for multi-pmem support" reworked blk dpa (DIMM Physical Address) accounting to comprehend multiple pmem namespace allocations aliasing with a given blk-dpa range. The following call trace is a result of failing to account for allocated blk capacity. WARNING: CPU: 1 PID: 2433 at tools/testing/nvdimm/../../../drivers/nvdimm/names 4 size_store+0x6f3/0x930 [libnvdimm] nd_region region5: allocation underrun: 0x0 of 0x1000000 bytes [..] Call Trace: dump_stack+0x86/0xc3 __warn+0xcb/0xf0 warn_slowpath_fmt+0x5f/0x80 size_store+0x6f3/0x930 [libnvdimm] dev_attr_store+0x18/0x30 If a given blk-dpa allocation does not alias with any pmem ranges then the full allocation should be accounted as busy space, not the size of the current pmem contribution to the region. The thinkos that led to this confusion was not realizing that the struct resource management is already guaranteeing no collisions between pmem allocations and blk allocations on the same dimm. Also, we do not try to support blk allocations in aliased pmem holes. This patch also fixes a case where the available blk goes negative. Cc: <[email protected]> Fixes: a1f3e4d ("libnvdimm, region: update nd_region_available_dpa() for multi-pmem support"). Reported-by: Dariusz Dokupil <[email protected]> Reported-by: Dave Jiang <[email protected]> Reported-by: Vishal Verma <[email protected]> Tested-by: Dave Jiang <[email protected]> Tested-by: Vishal Verma <[email protected]> Signed-off-by: Dan Williams <[email protected]>
1 parent b03b99a commit fe51473

File tree

1 file changed

+11
-66
lines changed

1 file changed

+11
-66
lines changed

drivers/nvdimm/dimm_devs.c

+11-66
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395395

396396
int alias_dpa_busy(struct device *dev, void *data)
397397
{
398-
resource_size_t map_end, blk_start, new, busy;
398+
resource_size_t map_end, blk_start, new;
399399
struct blk_alloc_info *info = data;
400400
struct nd_mapping *nd_mapping;
401401
struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436436
retry:
437437
/*
438438
* Find the free dpa from the end of the last pmem allocation to
439-
* the end of the interleave-set mapping that is not already
440-
* covered by a blk allocation.
439+
* the end of the interleave-set mapping.
441440
*/
442-
busy = 0;
443441
for_each_dpa_resource(ndd, res) {
442+
if (strncmp(res->name, "pmem", 4) != 0)
443+
continue;
444444
if ((res->start >= blk_start && res->start < map_end)
445445
|| (res->end >= blk_start
446446
&& res->end <= map_end)) {
447-
if (strncmp(res->name, "pmem", 4) == 0) {
448-
new = max(blk_start, min(map_end + 1,
449-
res->end + 1));
450-
if (new != blk_start) {
451-
blk_start = new;
452-
goto retry;
453-
}
454-
} else
455-
busy += min(map_end, res->end)
456-
- max(nd_mapping->start, res->start) + 1;
457-
} else if (nd_mapping->start > res->start
458-
&& map_end < res->end) {
459-
/* total eclipse of the PMEM region mapping */
460-
busy += nd_mapping->size;
461-
break;
447+
new = max(blk_start, min(map_end + 1, res->end + 1));
448+
if (new != blk_start) {
449+
blk_start = new;
450+
goto retry;
451+
}
462452
}
463453
}
464454

@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470460
return 1;
471461
}
472462

473-
info->available -= blk_start - nd_mapping->start + busy;
463+
info->available -= blk_start - nd_mapping->start;
474464

475465
return 0;
476466
}
477467

478-
static int blk_dpa_busy(struct device *dev, void *data)
479-
{
480-
struct blk_alloc_info *info = data;
481-
struct nd_mapping *nd_mapping;
482-
struct nd_region *nd_region;
483-
resource_size_t map_end;
484-
int i;
485-
486-
if (!is_nd_pmem(dev))
487-
return 0;
488-
489-
nd_region = to_nd_region(dev);
490-
for (i = 0; i < nd_region->ndr_mappings; i++) {
491-
nd_mapping = &nd_region->mapping[i];
492-
if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493-
break;
494-
}
495-
496-
if (i >= nd_region->ndr_mappings)
497-
return 0;
498-
499-
map_end = nd_mapping->start + nd_mapping->size - 1;
500-
if (info->res->start >= nd_mapping->start
501-
&& info->res->start < map_end) {
502-
if (info->res->end <= map_end) {
503-
info->busy = 0;
504-
return 1;
505-
} else {
506-
info->busy -= info->res->end - map_end;
507-
return 0;
508-
}
509-
} else if (info->res->end >= nd_mapping->start
510-
&& info->res->end <= map_end) {
511-
info->busy -= nd_mapping->start - info->res->start;
512-
return 0;
513-
} else {
514-
info->busy -= nd_mapping->size;
515-
return 0;
516-
}
517-
}
518-
519468
/**
520469
* nd_blk_available_dpa - account the unused dpa of BLK region
521470
* @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545494
for_each_dpa_resource(ndd, res) {
546495
if (strncmp(res->name, "blk", 3) != 0)
547496
continue;
548-
549-
info.res = res;
550-
info.busy = resource_size(res);
551-
device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552-
info.available -= info.busy;
497+
info.available -= resource_size(res);
553498
}
554499

555500
return info.available;

0 commit comments

Comments
 (0)