Loading block.c +1 −0 Original line number Diff line number Diff line Loading @@ -320,6 +320,7 @@ BlockDriverState *bdrv_new(void) QLIST_INIT(&bs->op_blockers[i]); } notifier_with_return_list_init(&bs->before_write_notifiers); qemu_co_mutex_init(&bs->reqs_lock); bs->refcnt = 1; bs->aio_context = qemu_get_aio_context(); Loading block/io.c +14 −2 Original line number Diff line number Diff line Loading @@ -378,8 +378,10 @@ static void tracked_request_end(BdrvTrackedRequest *req) atomic_dec(&req->bs->serialising_in_flight); } qemu_co_mutex_lock(&req->bs->reqs_lock); QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); qemu_co_mutex_unlock(&req->bs->reqs_lock); } /** Loading @@ -404,7 +406,9 @@ static void tracked_request_begin(BdrvTrackedRequest *req, qemu_co_queue_init(&req->wait_queue); qemu_co_mutex_lock(&bs->reqs_lock); QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); qemu_co_mutex_unlock(&bs->reqs_lock); } static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) Loading Loading @@ -526,6 +530,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) do { retry = false; qemu_co_mutex_lock(&bs->reqs_lock); QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req == self || (!req->serialising && !self->serialising)) { continue; Loading @@ -544,7 +549,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) * (instead of producing a deadlock in the former case). */ if (!req->waiting_for) { self->waiting_for = req; qemu_co_queue_wait(&req->wait_queue, NULL); qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); self->waiting_for = NULL; retry = true; waited = true; Loading @@ -552,6 +557,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) } } } qemu_co_mutex_unlock(&bs->reqs_lock); } while (retry); return waited; Loading Loading @@ -2291,14 +2297,17 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) goto early_exit; } qemu_co_mutex_lock(&bs->reqs_lock); current_gen = atomic_read(&bs->write_gen); /* Wait until any previous flushes are completed */ while (bs->active_flush_req) { qemu_co_queue_wait(&bs->flush_queue, NULL); qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); } /* Flushes reach this point in nondecreasing current_gen order. */ bs->active_flush_req = true; qemu_co_mutex_unlock(&bs->reqs_lock); /* Write back all layers by calling one driver function */ if (bs->drv->bdrv_co_flush) { Loading Loading @@ -2370,9 +2379,12 @@ out: if (ret == 0) { bs->flushed_gen = current_gen; } qemu_co_mutex_lock(&bs->reqs_lock); bs->active_flush_req = false; /* Return value is ignored - it's ok if wait queue is empty */ qemu_co_queue_next(&bs->flush_queue); qemu_co_mutex_unlock(&bs->reqs_lock); early_exit: bdrv_dec_in_flight(bs); Loading include/block/block_int.h +9 −5 Original line number Diff line number Diff line Loading @@ -609,11 +609,6 @@ struct BlockDriverState { uint64_t write_threshold_offset; NotifierWithReturn write_threshold_notifier; QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; CoQueue flush_queue; /* Serializing flush queue */ bool active_flush_req; /* Flush request in flight? */ unsigned int flushed_gen; /* Flushed write generation */ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; /* Offset after the highest byte written to */ Loading Loading @@ -647,6 +642,15 @@ struct BlockDriverState { /* Accessed with atomic ops. */ int quiesce_counter; unsigned int write_gen; /* Current data generation */ /* Protected by reqs_lock. */ CoMutex reqs_lock; QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; CoQueue flush_queue; /* Serializing flush queue */ bool active_flush_req; /* Flush request in flight? */ /* Only read/written by whoever has set active_flush_req to true. */ unsigned int flushed_gen; /* Flushed write generation */ }; struct BlockBackendRootState { Loading Loading
block.c +1 −0 Original line number Diff line number Diff line Loading @@ -320,6 +320,7 @@ BlockDriverState *bdrv_new(void) QLIST_INIT(&bs->op_blockers[i]); } notifier_with_return_list_init(&bs->before_write_notifiers); qemu_co_mutex_init(&bs->reqs_lock); bs->refcnt = 1; bs->aio_context = qemu_get_aio_context(); Loading
block/io.c +14 −2 Original line number Diff line number Diff line Loading @@ -378,8 +378,10 @@ static void tracked_request_end(BdrvTrackedRequest *req) atomic_dec(&req->bs->serialising_in_flight); } qemu_co_mutex_lock(&req->bs->reqs_lock); QLIST_REMOVE(req, list); qemu_co_queue_restart_all(&req->wait_queue); qemu_co_mutex_unlock(&req->bs->reqs_lock); } /** Loading @@ -404,7 +406,9 @@ static void tracked_request_begin(BdrvTrackedRequest *req, qemu_co_queue_init(&req->wait_queue); qemu_co_mutex_lock(&bs->reqs_lock); QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); qemu_co_mutex_unlock(&bs->reqs_lock); } static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) Loading Loading @@ -526,6 +530,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) do { retry = false; qemu_co_mutex_lock(&bs->reqs_lock); QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req == self || (!req->serialising && !self->serialising)) { continue; Loading @@ -544,7 +549,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) * (instead of producing a deadlock in the former case). */ if (!req->waiting_for) { self->waiting_for = req; qemu_co_queue_wait(&req->wait_queue, NULL); qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); self->waiting_for = NULL; retry = true; waited = true; Loading @@ -552,6 +557,7 @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) } } } qemu_co_mutex_unlock(&bs->reqs_lock); } while (retry); return waited; Loading Loading @@ -2291,14 +2297,17 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) goto early_exit; } qemu_co_mutex_lock(&bs->reqs_lock); current_gen = atomic_read(&bs->write_gen); /* Wait until any previous flushes are completed */ while (bs->active_flush_req) { qemu_co_queue_wait(&bs->flush_queue, NULL); qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock); } /* Flushes reach this point in nondecreasing current_gen order. */ bs->active_flush_req = true; qemu_co_mutex_unlock(&bs->reqs_lock); /* Write back all layers by calling one driver function */ if (bs->drv->bdrv_co_flush) { Loading Loading @@ -2370,9 +2379,12 @@ out: if (ret == 0) { bs->flushed_gen = current_gen; } qemu_co_mutex_lock(&bs->reqs_lock); bs->active_flush_req = false; /* Return value is ignored - it's ok if wait queue is empty */ qemu_co_queue_next(&bs->flush_queue); qemu_co_mutex_unlock(&bs->reqs_lock); early_exit: bdrv_dec_in_flight(bs); Loading
include/block/block_int.h +9 −5 Original line number Diff line number Diff line Loading @@ -609,11 +609,6 @@ struct BlockDriverState { uint64_t write_threshold_offset; NotifierWithReturn write_threshold_notifier; QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; CoQueue flush_queue; /* Serializing flush queue */ bool active_flush_req; /* Flush request in flight? */ unsigned int flushed_gen; /* Flushed write generation */ QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; /* Offset after the highest byte written to */ Loading Loading @@ -647,6 +642,15 @@ struct BlockDriverState { /* Accessed with atomic ops. */ int quiesce_counter; unsigned int write_gen; /* Current data generation */ /* Protected by reqs_lock. */ CoMutex reqs_lock; QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; CoQueue flush_queue; /* Serializing flush queue */ bool active_flush_req; /* Flush request in flight? */ /* Only read/written by whoever has set active_flush_req to true. */ unsigned int flushed_gen; /* Flushed write generation */ }; struct BlockBackendRootState { Loading