diff --git a/module/os/windows/spl/spl-kmem.c b/module/os/windows/spl/spl-kmem.c index 7f3dfb05f836..5524be1b59d0 100644 --- a/module/os/windows/spl/spl-kmem.c +++ b/module/os/windows/spl/spl-kmem.c @@ -71,6 +71,7 @@ const unsigned int spl_vm_page_free_min = 3500; static kcondvar_t spl_free_thread_cv; static kmutex_t spl_free_thread_lock; static boolean_t spl_free_thread_exit; +volatile boolean_t spl_free_thread_running = FALSE; static volatile _Atomic int64_t spl_free = 0; static boolean_t spl_event_thread_exit = FALSE; @@ -5725,6 +5726,7 @@ spl_kmem_thread_init(void) spl_free_thread_exit = FALSE; (void) cv_init(&spl_free_thread_cv, NULL, CV_DEFAULT, NULL); (void) thread_create(NULL, 0, spl_free_thread, 0, 0, 0, 0, 92); + spl_free_thread_running = TRUE; spl_event_thread_exit = FALSE; (void) thread_create(NULL, 0, spl_event_thread, 0, 0, 0, 0, 92); @@ -5751,6 +5753,7 @@ spl_kmem_thread_fini(void) cv_signal(&spl_free_thread_cv); cv_wait(&spl_free_thread_cv, &spl_free_thread_lock); } + spl_free_thread_running = FALSE; mutex_exit(&spl_free_thread_lock); cv_destroy(&spl_free_thread_cv); mutex_destroy(&spl_free_thread_lock); diff --git a/module/os/windows/spl/spl-vmem.c b/module/os/windows/spl/spl-vmem.c index eb7262ae2574..d43d354a4fc9 100644 --- a/module/os/windows/spl/spl-vmem.c +++ b/module/os/windows/spl/spl-vmem.c @@ -1630,35 +1630,51 @@ vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase, mutex_enter(&vmp->vm_lock); if (vmflag & VM_NOSLEEP) break; - atomic_inc_64(&vmp->vm_kstat.vk_wait.value.ui64); - atomic_inc_64(&vmp->vm_kstat.vk_threads_waiting.value.ui64); - atomic_inc_64(&spl_vmem_threads_waiting); - if (spl_vmem_threads_waiting > 0) { - dprintf("SPL: %s: vmem waiting for %lu sized alloc " - "for %s, waiting threads %llu, total threads " - "waiting = %llu\n", - __func__, size, vmp->vm_name, - vmp->vm_kstat.vk_threads_waiting.value.ui64, - spl_vmem_threads_waiting); - extern int64_t spl_free_set_and_wait_pressure(int64_t, - boolean_t, clock_t); - extern int64_t spl_free_manual_pressure_wrapper(void); - mutex_exit(&vmp->vm_lock); - // release other waiting threads - spl_free_set_pressure(0); - int64_t target_pressure = size * - spl_vmem_threads_waiting; - int64_t delivered_pressure = - spl_free_set_and_wait_pressure(target_pressure, - TRUE, USEC2NSEC(500)); - dprintf("SPL: %s: pressure %lld targeted, %lld " - "delivered\n", __func__, target_pressure, - delivered_pressure); - mutex_enter(&vmp->vm_lock); + + extern volatile boolean_t spl_free_thread_running; + + if (spl_free_thread_running) { + atomic_inc_64(&vmp->vm_kstat.vk_wait.value.ui64); + atomic_inc_64( + &vmp->vm_kstat.vk_threads_waiting.value.ui64); + atomic_inc_64(&spl_vmem_threads_waiting); + if (spl_vmem_threads_waiting > 0) { + dprintf("SPL: %s: vmem waiting for %lu sized " + "alloc for %s, waiting threads %llu, total " + "threads waiting = %llu\n", + __func__, size, vmp->vm_name, + vmp->vm_kstat.vk_threads_waiting.value.ui64, + spl_vmem_threads_waiting); + extern int64_t spl_free_set_and_wait_pressure( + int64_t, boolean_t, clock_t); + extern int64_t spl_free_manual_pressure_wrapper( + void); + mutex_exit(&vmp->vm_lock); + // release other waiting threads + spl_free_set_pressure(0); + int64_t target_pressure = size * + spl_vmem_threads_waiting; + int64_t delivered_pressure = + spl_free_set_and_wait_pressure( + target_pressure, TRUE, + USEC2NSEC(500)); + dprintf("SPL: %s: pressure %lld targeted, %lld " + "delivered\n", __func__, target_pressure, + delivered_pressure); + mutex_enter(&vmp->vm_lock); + } + cv_wait(&vmp->vm_cv, &vmp->vm_lock); + atomic_dec_64(&spl_vmem_threads_waiting); + atomic_dec_64( + &vmp->vm_kstat.vk_threads_waiting.value.ui64); + } + else + { + DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_INFO_LEVEL, + "SPL: %s: Not able to set pressure %lld as free" + "thread is not running\n", __func__, size); + delay(hz); } - cv_wait(&vmp->vm_cv, &vmp->vm_lock); - atomic_dec_64(&spl_vmem_threads_waiting); - atomic_dec_64(&vmp->vm_kstat.vk_threads_waiting.value.ui64); } if (vbest != NULL) { ASSERT(vbest->vs_type == VMEM_FREE);