From 4237b6ca20346c642d4bc0c88aab0ffb54a966ab Mon Sep 17 00:00:00 2001 From: Max Wash Date: Thu, 2 Feb 2023 21:14:02 +0000 Subject: [PATCH] sandbox: vm: add synchronisation using spinlocks --- sandbox/vm/cache.c | 7 ++++++- sandbox/vm/include/socks/vm.h | 4 ++++ sandbox/vm/zone.c | 19 ++++++++++++++++++- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/sandbox/vm/cache.c b/sandbox/vm/cache.c index a303952..38eb24d 100644 --- a/sandbox/vm/cache.c +++ b/sandbox/vm/cache.c @@ -140,6 +140,9 @@ static unsigned int pointer_to_slot(vm_slab_t *slab, void *p) void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags) { + unsigned long irq_flags; + spin_lock_irqsave(&cache->c_lock, &irq_flags); + vm_slab_t *slab = NULL; if (!queue_empty(&cache->c_slabs_partial)) { /* prefer using up partially-full slabs before taking a fresh one */ @@ -156,6 +159,7 @@ void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags) } if (!slab) { + spin_unlock_irqrestore(&cache->c_lock, irq_flags); return NULL; } @@ -167,7 +171,8 @@ void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags) } else { queue_push_back(&cache->c_slabs_partial, &slab->s_list); } - + + spin_unlock_irqrestore(&cache->c_lock, irq_flags); return p; } diff --git a/sandbox/vm/include/socks/vm.h b/sandbox/vm/include/socks/vm.h index 05628ac..4770fc6 100644 --- a/sandbox/vm/include/socks/vm.h +++ b/sandbox/vm/include/socks/vm.h @@ -5,6 +5,7 @@ #include #include #include +#include /* maximum number of NUMA nodes */ #define VM_MAX_NODES 64 @@ -106,6 +107,7 @@ typedef struct vm_zone_descriptor { typedef struct vm_zone { vm_zone_descriptor_t z_info; + spin_lock_t z_lock; queue_t z_free_pages[VM_MAX_PAGE_ORDERS]; unsigned long z_size; @@ -130,6 +132,8 @@ typedef struct vm_cache { queue_t c_slabs_partial; queue_t c_slabs_empty; + spin_lock_t c_lock; + /* number of objects that can be stored in a single slab */ unsigned int c_obj_count; /* the size of object kept in the cache */ diff --git a/sandbox/vm/zone.c b/sandbox/vm/zone.c index b2a0e54..80b5132 100644 --- a/sandbox/vm/zone.c +++ b/sandbox/vm/zone.c @@ -1,4 +1,5 @@ -#include "socks/queue.h" +#include +#include #include #include #include @@ -96,6 +97,10 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) zone_info->zd_name, zone_info->zd_base, zone_info->zd_limit); memset(z, 0x0, sizeof *z); memcpy(&z->z_info, zone_info, sizeof *zone_info); + z->z_lock = SPIN_LOCK_INIT; + + unsigned long flags; + spin_lock_irqsave(&z->z_lock, &flags); phys_addr_t block_start = zone_info->zd_base, block_end = zone_info->zd_limit; int this_page_reserved = 0, last_page_reserved = -1; @@ -126,6 +131,8 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) if (block_start != block_end) { convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved); } + + spin_unlock_irqrestore(&z->z_lock, flags); } static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) @@ -179,8 +186,12 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags) { + unsigned long irq_flags; + spin_lock_irqsave(&z->z_lock, &irq_flags); + int result = replenish_free_page_list(z, order); if (result != 0) { + spin_unlock_irqrestore(&z->z_lock, irq_flags); return NULL; } @@ -190,11 +201,15 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl i->p_flags |= VM_PAGE_ALLOC; } + spin_unlock_irqrestore(&z->z_lock, irq_flags); return pg; } void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg) { + unsigned long irq_flags; + spin_lock_irqsave(&z->z_lock, &irq_flags); + pg->p_flags &= ~VM_PAGE_ALLOC; queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list); @@ -211,4 +226,6 @@ void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg) pg = huge; } + + spin_unlock_irqrestore(&z->z_lock, irq_flags); }