Skip to content

Commit

Permalink
qed: protect table cache with CoMutex
Browse files Browse the repository at this point in the history
This makes the driver thread-safe.  The CoMutex is dropped temporarily
while accessing the data clusters or the backing file.

Signed-off-by: Paolo Bonzini <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Fam Zheng <[email protected]>
Signed-off-by: Fam Zheng <[email protected]>
  • Loading branch information
bonzini authored and Fam Zheng committed Jul 17, 2017
1 parent 61c7887 commit 1f01e50
Show file tree
Hide file tree
Showing 5 changed files with 129 additions and 54 deletions.
4 changes: 2 additions & 2 deletions block/qed-cluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
*
* On failure QED_CLUSTER_L2 or QED_CLUSTER_L1 is returned for missing L2 or L1
* table offset, respectively. len is number of contiguous unallocated bytes.
*
* Called with table_lock held.
*/
int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
uint64_t pos, size_t *len,
Expand Down Expand Up @@ -112,7 +114,6 @@ int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
}

ret = qed_read_l2_table(s, request, l2_offset);
qed_acquire(s);
if (ret) {
goto out;
}
Expand All @@ -137,6 +138,5 @@ int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,

out:
*img_offset = offset;
qed_release(s);
return ret;
}
6 changes: 6 additions & 0 deletions block/qed-l2-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ CachedL2Table *qed_alloc_l2_cache_entry(L2TableCache *l2_cache)
/**
* Decrease an entry's reference count and free if necessary when the reference
* count drops to zero.
*
* Called with table_lock held.
*/
void qed_unref_l2_cache_entry(CachedL2Table *entry)
{
Expand All @@ -122,6 +124,8 @@ void qed_unref_l2_cache_entry(CachedL2Table *entry)
*
* For a cached entry, this function increases the reference count and returns
* the entry.
*
* Called with table_lock held.
*/
CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset)
{
Expand Down Expand Up @@ -150,6 +154,8 @@ CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset)
* N.B. This function steals a reference to the l2_table from the caller so the
* caller must obtain a new reference by issuing a call to
* qed_find_l2_cache_entry().
*
* Called with table_lock held.
*/
void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table)
{
Expand Down
24 changes: 18 additions & 6 deletions block/qed-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "qed.h"
#include "qemu/bswap.h"

/* Called either from qed_check or with table_lock held. */
static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
{
QEMUIOVector qiov;
Expand All @@ -32,18 +33,22 @@ static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)

trace_qed_read_table(s, offset, table);

if (qemu_in_coroutine()) {
qemu_co_mutex_unlock(&s->table_lock);
}
ret = bdrv_preadv(s->bs->file, offset, &qiov);
if (qemu_in_coroutine()) {
qemu_co_mutex_lock(&s->table_lock);
}
if (ret < 0) {
goto out;
}

/* Byteswap offsets */
qed_acquire(s);
noffsets = qiov.size / sizeof(uint64_t);
for (i = 0; i < noffsets; i++) {
table->offsets[i] = le64_to_cpu(table->offsets[i]);
}
qed_release(s);

ret = 0;
out:
Expand All @@ -61,6 +66,8 @@ static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table)
* @index: Index of first element
* @n: Number of elements
* @flush: Whether or not to sync to disk
*
* Called either from qed_check or with table_lock held.
*/
static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
unsigned int index, unsigned int n, bool flush)
Expand Down Expand Up @@ -97,16 +104,20 @@ static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
/* Adjust for offset into table */
offset += start * sizeof(uint64_t);

if (qemu_in_coroutine()) {
qemu_co_mutex_unlock(&s->table_lock);
}
ret = bdrv_pwritev(s->bs->file, offset, &qiov);
if (qemu_in_coroutine()) {
qemu_co_mutex_lock(&s->table_lock);
}
trace_qed_write_table_cb(s, table, flush, ret);
if (ret < 0) {
goto out;
}

if (flush) {
qed_acquire(s);
ret = bdrv_flush(s->bs);
qed_release(s);
if (ret < 0) {
goto out;
}
Expand All @@ -123,6 +134,7 @@ int qed_read_l1_table_sync(BDRVQEDState *s)
return qed_read_table(s, s->header.l1_table_offset, s->l1_table);
}

/* Called either from qed_check or with table_lock held. */
int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n)
{
BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE);
Expand All @@ -136,6 +148,7 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
return qed_write_l1_table(s, index, n);
}

/* Called either from qed_check or with table_lock held. */
int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
{
int ret;
Expand All @@ -154,7 +167,6 @@ int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD);
ret = qed_read_table(s, offset, request->l2_table->table);

qed_acquire(s);
if (ret) {
/* can't trust loaded L2 table anymore */
qed_unref_l2_cache_entry(request->l2_table);
Expand All @@ -170,7 +182,6 @@ int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset);
assert(request->l2_table != NULL);
}
qed_release(s);

return ret;
}
Expand All @@ -180,6 +191,7 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset
return qed_read_l2_table(s, request, offset);
}

/* Called either from qed_check or with table_lock held. */
int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
unsigned int index, unsigned int n, bool flush)
{
Expand Down
Loading

0 comments on commit 1f01e50

Please sign in to comment.