Commit 98b2e3c9 authored by Peter Maydell's avatar Peter Maydell

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

Pull request

This pull request also contains the two commits from the previous pull request
that was dropped due to a mingw compilation error.  The compilation should now
be fixed.

# gpg: Signature made Tue 08 Oct 2019 15:54:26 BST
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  iotests/262: Switch source/dest VM launch order
  block: Skip COR for inactive nodes
  virtio-blk: schedule virtio_notify_config to run on main context
  util/ioc.c: try to reassure Coverity about qemu_iovec_init_extended
Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 14d40ab1 4d804b53
Pipeline #21055 failed with stage
in 0 seconds
......@@ -1246,11 +1246,18 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
BDRV_REQUEST_MAX_BYTES);
unsigned int progress = 0;
bool skip_write;
if (!drv) {
return -ENOMEDIUM;
}
/*
* Do not write anything when the BDS is inactive. That is not
* allowed, and it would not help.
*/
skip_write = (bs->open_flags & BDRV_O_INACTIVE);
/* FIXME We cannot require callers to have write permissions when all they
* are doing is a read request. If we did things right, write permissions
* would be obtained anyway, but internally by the copy-on-read code. As
......@@ -1274,23 +1281,29 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
while (cluster_bytes) {
int64_t pnum;
ret = bdrv_is_allocated(bs, cluster_offset,
MIN(cluster_bytes, max_transfer), &pnum);
if (ret < 0) {
/* Safe to treat errors in querying allocation as if
* unallocated; we'll probably fail again soon on the
* read, but at least that will set a decent errno.
*/
if (skip_write) {
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(cluster_bytes, max_transfer);
}
} else {
ret = bdrv_is_allocated(bs, cluster_offset,
MIN(cluster_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
* unallocated; we'll probably fail again soon on the
* read, but at least that will set a decent errno.
*/
pnum = MIN(cluster_bytes, max_transfer);
}
/* Stop at EOF if the image ends in the middle of the cluster */
if (ret == 0 && pnum == 0) {
assert(progress >= bytes);
break;
}
/* Stop at EOF if the image ends in the middle of the cluster */
if (ret == 0 && pnum == 0) {
assert(progress >= bytes);
break;
}
assert(skip_bytes < pnum);
assert(skip_bytes < pnum);
}
if (ret <= 0) {
QEMUIOVector local_qiov;
......
......@@ -16,6 +16,7 @@
#include "qemu/iov.h"
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "trace.h"
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
......@@ -1086,11 +1087,24 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0;
}
static void virtio_resize_cb(void *opaque)
{
VirtIODevice *vdev = opaque;
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
virtio_notify_config(vdev);
}
static void virtio_blk_resize(void *opaque)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
virtio_notify_config(vdev);
/*
* virtio_notify_config() needs to acquire the global mutex,
* so it can't be called from an iothread. Instead, schedule
* it to be run in the main context BH.
*/
aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
}
static const BlockDevOps virtio_block_ops = {
......
......@@ -54,12 +54,6 @@ with iotests.FilePath('img') as img_path, \
os.mkfifo(fifo)
iotests.log('Launching source VM...')
add_opts(vm_a)
vm_a.launch()
vm_a.enable_migration_events('A')
iotests.log('Launching destination VM...')
add_opts(vm_b)
vm_b.add_incoming("exec: cat '%s'" % (fifo))
......@@ -67,6 +61,12 @@ with iotests.FilePath('img') as img_path, \
vm_b.enable_migration_events('B')
iotests.log('Launching source VM...')
add_opts(vm_a)
vm_a.launch()
vm_a.enable_migration_events('A')
iotests.log('Starting migration to B...')
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo)))
with iotests.Timeout(3, 'Migration does not complete'):
......
Launching source VM...
Enabling migration QMP events on A...
{"return": {}}
Launching destination VM...
Enabling migration QMP events on B...
{"return": {}}
Launching source VM...
Enabling migration QMP events on A...
{"return": {}}
Starting migration to B...
{"return": {}}
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
......
......@@ -423,7 +423,7 @@ void qemu_iovec_init_extended(
{
size_t mid_head, mid_tail;
int total_niov, mid_niov = 0;
struct iovec *p, *mid_iov;
struct iovec *p, *mid_iov = NULL;
if (mid_len) {
mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len,
......@@ -446,7 +446,8 @@ void qemu_iovec_init_extended(
p++;
}
if (mid_len) {
assert(!mid_niov == !mid_len);
if (mid_niov) {
memcpy(p, mid_iov, mid_niov * sizeof(*p));
p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head;
p[0].iov_len -= mid_head;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment