vm: region: fix some cases where regions weren't being unlocked after use.

This commit is contained in:
2026-02-26 19:41:40 +00:00
parent 1cdde0d32e
commit eb8758bc5e

View File

@@ -36,6 +36,15 @@
region_find_free_area_linear(region, length) region_find_free_area_linear(region, length)
#endif #endif
#define unlock_mapping_parent(p, root) \
do { \
struct vm_region *parent \
= region_from_entry(p->m_entry.e_parent); \
if (parent != root) { \
vm_region_unlock(parent); \
} \
} while (0)
/* iterates over a range of mapped virtual memory in a region, and provides /* iterates over a range of mapped virtual memory in a region, and provides
* a moving buffer through which the memory can be accessed */ * a moving buffer through which the memory can be accessed */
struct vm_iterator { struct vm_iterator {
@@ -296,12 +305,13 @@ static struct vm_region *region_get_child_region_recursive(
* this function should be called with `region` locked. if a mapping is found, * this function should be called with `region` locked. if a mapping is found,
* it will be returned with its immediate parent locked. */ * it will be returned with its immediate parent locked. */
static struct vm_region_mapping *region_get_mapping_recursive( static struct vm_region_mapping *region_get_mapping_recursive(
struct vm_region *region, struct vm_region *root,
off_t *offp, off_t *offp,
size_t len) size_t len)
{ {
off_t offset = *offp; off_t offset = *offp;
region = region_get_child_region_recursive(region, &offset, len); struct vm_region *region
= region_get_child_region_recursive(root, &offset, len);
if (!region) { if (!region) {
return NULL; return NULL;
} }
@@ -311,6 +321,14 @@ static struct vm_region_mapping *region_get_mapping_recursive(
struct vm_region_entry *entry = region_get_entry(region, offset, len); struct vm_region_entry *entry = region_get_entry(region, offset, len);
*offp = offset; *offp = offset;
if (!entry) {
if (region != root) {
vm_region_unlock(region);
}
return NULL;
}
/* return the mapping with the parent region still locked */ /* return the mapping with the parent region still locked */
return mapping_from_entry(entry); return mapping_from_entry(entry);
} }
@@ -593,7 +611,12 @@ static void vm_iterator_begin(
off_t offset = base - vm_region_get_base_address(region); off_t offset = base - vm_region_get_base_address(region);
it->it_mapping = region_get_mapping_recursive(region, &offset, 1); it->it_mapping = region_get_mapping_recursive(region, &offset, 1);
if (!it->it_mapping || (it->it_mapping->m_prot & prot) != prot) { if (!it->it_mapping) {
return;
}
if ((it->it_mapping->m_prot & prot) != prot) {
unlock_mapping_parent(it->it_mapping, region);
return; return;
} }
@@ -612,6 +635,7 @@ static void vm_iterator_begin(
} }
if (!pg) { if (!pg) {
unlock_mapping_parent(it->it_mapping, region);
return; return;
} }
@@ -643,15 +667,6 @@ static void vm_iterator_begin(
static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes) static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
{ {
#define UNLOCK_MAPPING_PARENT(p) \
do { \
struct vm_region *parent \
= region_from_entry(p->m_entry.e_parent); \
if (parent != it->it_region) { \
vm_region_unlock(parent); \
} \
} while (0)
if (nr_bytes < it->it_max) { if (nr_bytes < it->it_max) {
it->it_base += nr_bytes; it->it_base += nr_bytes;
it->it_buf = (char *)it->it_buf + nr_bytes; it->it_buf = (char *)it->it_buf + nr_bytes;
@@ -661,7 +676,7 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
/* the parent region of it->it_mapping is locked here. if it is /* the parent region of it->it_mapping is locked here. if it is
* different from it->it_region, it must be unlocked */ * different from it->it_region, it must be unlocked */
UNLOCK_MAPPING_PARENT(it->it_mapping); unlock_mapping_parent(it->it_mapping, it->it_region);
it->it_base += nr_bytes; it->it_base += nr_bytes;
off_t offset = it->it_base - vm_region_get_base_address(it->it_region); off_t offset = it->it_base - vm_region_get_base_address(it->it_region);
@@ -674,13 +689,13 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
return KERN_MEMORY_FAULT; return KERN_MEMORY_FAULT;
} }
/* past this point, if we encounter an error, must remember to unlock /* past this point, if we encounter an error, must remember to
* the parent region of next_mapping */ * unlock the parent region of next_mapping */
if ((next_mapping->m_prot & it->it_prot) != it->it_prot) { if ((next_mapping->m_prot & it->it_prot) != it->it_prot) {
it->it_buf = NULL; it->it_buf = NULL;
it->it_max = 0; it->it_max = 0;
UNLOCK_MAPPING_PARENT(next_mapping); unlock_mapping_parent(next_mapping, it->it_region);
return KERN_MEMORY_FAULT; return KERN_MEMORY_FAULT;
} }
@@ -699,7 +714,7 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
} }
if (!pg) { if (!pg) {
UNLOCK_MAPPING_PARENT(next_mapping); unlock_mapping_parent(next_mapping, it->it_region);
return KERN_NO_MEMORY; return KERN_NO_MEMORY;
} }
@@ -730,9 +745,20 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
return KERN_OK; return KERN_OK;
} }
/* this function must be called with `root` locked. `root` will be the first /* this function must be called when you are finished with a
* entry visited by the iterator. from there, child entries are visited in * vm_iterator, to ensure that all held locks are released. */
* depth-first order. */ static void vm_iterator_finish(struct vm_iterator *it)
{
if (it->it_mapping) {
unlock_mapping_parent(it->it_mapping, it->it_region);
}
memset(it, 0x0, sizeof *it);
}
/* this function must be called with `root` locked. `root` will be the
* first entry visited by the iterator. from there, child entries are
* visited in depth-first order. */
static void entry_iterator_begin( static void entry_iterator_begin(
struct entry_iterator *it, struct entry_iterator *it,
struct vm_region *root) struct vm_region *root)
@@ -742,8 +768,8 @@ static void entry_iterator_begin(
it->it_entry = &root->vr_entry; it->it_entry = &root->vr_entry;
} }
/* this function must be called when you are finished with an entry_iterator, /* this function must be called when you are finished with an
* to ensure that all held locks are released. */ * entry_iterator, to ensure that all held locks are released. */
static void entry_iterator_finish(struct entry_iterator *it) static void entry_iterator_finish(struct entry_iterator *it)
{ {
struct vm_region_entry *cur = it->it_entry; struct vm_region_entry *cur = it->it_entry;
@@ -771,10 +797,10 @@ static void entry_iterator_finish(struct entry_iterator *it)
/* move to the next entry in the traversal order. /* move to the next entry in the traversal order.
* when this function returns: * when this function returns:
* 1. if the visited entry is a region, it will be locked. * 1. if the visited entry is a region, it will be locked.
* 2. if the visited entry is a mapping, its parent region will be locked. * 2. if the visited entry is a mapping, its parent region will be
* a region will remain locked until all of its children and n-grand-children * locked. a region will remain locked until all of its children and
* have been visited. once iteration is finished, only `it->it_root` will be * n-grand-children have been visited. once iteration is finished, only
* locked. * `it->it_root` will be locked.
*/ */
static void entry_iterator_move_next(struct entry_iterator *it) static void entry_iterator_move_next(struct entry_iterator *it)
{ {
@@ -791,8 +817,9 @@ static void entry_iterator_move_next(struct entry_iterator *it)
if (entry->e_type == VM_REGION_ENTRY_REGION) { if (entry->e_type == VM_REGION_ENTRY_REGION) {
struct vm_region *child_region struct vm_region *child_region
= region_from_entry(entry); = region_from_entry(entry);
/* since `region` is locked, interrupts are already /* since `region` is locked, interrupts are
* disabled, so don't use lock_irq() here */ * already disabled, so don't use lock_irq()
* here */
vm_region_lock(child_region); vm_region_lock(child_region);
} }
@@ -839,8 +866,9 @@ static void entry_iterator_move_next(struct entry_iterator *it)
} }
} }
/* erase the current entry and move to the next entry in the traversal order. /* erase the current entry and move to the next entry in the traversal
* the current entry MUST be a mapping, otherwise nothing will happen. * order. the current entry MUST be a mapping, otherwise nothing will
* happen.
*/ */
static void entry_iterator_erase(struct entry_iterator *it) static void entry_iterator_erase(struct entry_iterator *it)
{ {
@@ -999,7 +1027,8 @@ struct vm_region *vm_region_cast(struct object *obj)
return VM_REGION_CAST(obj); return VM_REGION_CAST(obj);
} }
/* this function should be called with `parent` locked (if parent is non-NULL) /* this function should be called with `parent` locked (if parent is
* non-NULL)
*/ */
kern_status_t vm_region_create( kern_status_t vm_region_create(
struct vm_region *parent, struct vm_region *parent,
@@ -1088,9 +1117,9 @@ kern_status_t vm_region_kill(
= region_from_entry(region->vr_entry.e_parent); = region_from_entry(region->vr_entry.e_parent);
region->vr_entry.e_parent = NULL; region->vr_entry.e_parent = NULL;
/* locks must be acquired in parent->child order. since we're /* locks must be acquired in parent->child order. since
* going backwards here, unlock `region` before locking its * we're going backwards here, unlock `region` before
* parent */ * locking its parent */
vm_region_unlock_irqrestore(region, *lock_flags); vm_region_unlock_irqrestore(region, *lock_flags);
vm_region_lock_irqsave(parent, lock_flags); vm_region_lock_irqsave(parent, lock_flags);
btree_delete(&parent->vr_entries, &region->vr_entry.e_node); btree_delete(&parent->vr_entries, &region->vr_entry.e_node);
@@ -1175,8 +1204,8 @@ kern_status_t vm_region_map_object(
root, root,
&region_offset, &region_offset,
length); length);
/* if `region` != `root`, it will need to be unlocked at the end /* if `region` != `root`, it will need to be unlocked at
* of the function */ * the end of the function */
} }
if (region->vr_status != VM_REGION_ONLINE) { if (region->vr_status != VM_REGION_ONLINE) {
@@ -1257,8 +1286,8 @@ kern_status_t vm_region_map_object(
return KERN_OK; return KERN_OK;
} }
/* unmap some pages in the middle of a mapping, splitting it into two separate /* unmap some pages in the middle of a mapping, splitting it into two
* mappings */ * separate mappings */
static kern_status_t split_mapping( static kern_status_t split_mapping(
struct vm_region_mapping *mapping, struct vm_region_mapping *mapping,
struct vm_region *root, struct vm_region *root,
@@ -1488,7 +1517,8 @@ kern_status_t vm_region_unmap(
unmap_area_offset, unmap_area_offset,
unmap_area_limit); unmap_area_limit);
} else { } else {
panic("don't know what to do with this mapping"); panic("don't know what to do with this "
"mapping");
} }
if (delete) { if (delete) {
@@ -1557,15 +1587,18 @@ bool vm_region_validate_access(
return false; return false;
} }
if ((mapping->m_prot & prot) != prot) { bool ok = (mapping->m_prot & prot) == prot;
return false;
}
struct vm_region *parent struct vm_region *parent
= region_from_entry(mapping->m_entry.e_parent); = region_from_entry(mapping->m_entry.e_parent);
if (parent != region) { if (parent != region) {
vm_region_unlock(parent); vm_region_unlock(parent);
} }
if (!ok) {
return false;
}
} }
return true; return true;
@@ -1671,6 +1704,8 @@ kern_status_t vm_region_read_kernel(
dest += to_move; dest += to_move;
} }
vm_iterator_finish(&src);
if (nr_read) { if (nr_read) {
*nr_read = r; *nr_read = r;
} }
@@ -1727,6 +1762,9 @@ kern_status_t vm_region_memmove(
r += to_move; r += to_move;
} }
vm_iterator_finish(&src);
vm_iterator_finish(&dest);
if (nr_moved) { if (nr_moved) {
*nr_moved = r; *nr_moved = r;
} }