net: virtio-net and vmxnet3 use offloading API

With this patch, virtio-net and vmxnet3 frontends make
use of the qemu_peer_* API for backend offloadings manipulations,
instead of calling TAP-specific functions directly.
We also remove the existing checks which prevent those frontends
from using offloadings with backends different from TAP (e.g. netmap).

Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Vincenzo Maffione 2014-02-06 17:02:18 +01:00 committed by Stefan Hajnoczi
parent 2e753bcc7d
commit cf528b8958
3 changed files with 13 additions and 19 deletions

View file

@ -106,7 +106,7 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
goto fail; goto fail;
} }
net->nc = backend; net->nc = backend;
net->dev.backend_features = tap_has_vnet_hdr(backend) ? 0 : net->dev.backend_features = backend->info->has_vnet_hdr(backend) ? 0 :
(1 << VHOST_NET_F_VIRTIO_NET_HDR); (1 << VHOST_NET_F_VIRTIO_NET_HDR);
net->backend = r; net->backend = r;
@ -117,7 +117,7 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
if (r < 0) { if (r < 0) {
goto fail; goto fail;
} }
if (!tap_has_vnet_hdr_len(backend, if (!backend->info->has_vnet_hdr_len(backend,
sizeof(struct virtio_net_hdr_mrg_rxbuf))) { sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF); net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF);
} }

View file

@ -325,11 +325,7 @@ static void peer_test_vnet_hdr(VirtIONet *n)
return; return;
} }
if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) { n->has_vnet_hdr = qemu_peer_has_vnet_hdr(nc);
return;
}
n->has_vnet_hdr = tap_has_vnet_hdr(nc->peer);
} }
static int peer_has_vnet_hdr(VirtIONet *n) static int peer_has_vnet_hdr(VirtIONet *n)
@ -342,7 +338,7 @@ static int peer_has_ufo(VirtIONet *n)
if (!peer_has_vnet_hdr(n)) if (!peer_has_vnet_hdr(n))
return 0; return 0;
n->has_ufo = tap_has_ufo(qemu_get_queue(n->nic)->peer); n->has_ufo = qemu_peer_has_ufo(qemu_get_queue(n->nic));
return n->has_ufo; return n->has_ufo;
} }
@ -361,8 +357,8 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
nc = qemu_get_subqueue(n->nic, i); nc = qemu_get_subqueue(n->nic, i);
if (peer_has_vnet_hdr(n) && if (peer_has_vnet_hdr(n) &&
tap_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) { qemu_peer_has_vnet_hdr_len(nc, n->guest_hdr_len)) {
tap_set_vnet_hdr_len(nc->peer, n->guest_hdr_len); qemu_peer_set_vnet_hdr_len(nc, n->guest_hdr_len);
n->host_hdr_len = n->guest_hdr_len; n->host_hdr_len = n->guest_hdr_len;
} }
} }
@ -463,7 +459,7 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
static void virtio_net_apply_guest_offloads(VirtIONet *n) static void virtio_net_apply_guest_offloads(VirtIONet *n)
{ {
tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer, qemu_peer_set_offload(qemu_get_subqueue(n->nic, 0),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
@ -1544,7 +1540,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
peer_test_vnet_hdr(n); peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) { if (peer_has_vnet_hdr(n)) {
for (i = 0; i < n->max_queues; i++) { for (i = 0; i < n->max_queues; i++) {
tap_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); qemu_peer_using_vnet_hdr(qemu_get_subqueue(n->nic, i), true);
} }
n->host_hdr_len = sizeof(struct virtio_net_hdr); n->host_hdr_len = sizeof(struct virtio_net_hdr);
} else { } else {

View file

@ -1290,7 +1290,7 @@ static void vmxnet3_update_features(VMXNET3State *s)
s->lro_supported, rxcso_supported, s->lro_supported, rxcso_supported,
s->rx_vlan_stripping); s->rx_vlan_stripping);
if (s->peer_has_vhdr) { if (s->peer_has_vhdr) {
tap_set_offload(qemu_get_queue(s->nic)->peer, qemu_peer_set_offload(qemu_get_queue(s->nic),
rxcso_supported, rxcso_supported,
s->lro_supported, s->lro_supported,
s->lro_supported, s->lro_supported,
@ -1883,11 +1883,9 @@ static NetClientInfo net_vmxnet3_info = {
static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s) static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s)
{ {
NetClientState *peer = qemu_get_queue(s->nic)->peer; NetClientState *nc = qemu_get_queue(s->nic);
if ((NULL != peer) && if (qemu_peer_has_vnet_hdr(nc)) {
(peer->info->type == NET_CLIENT_OPTIONS_KIND_TAP) &&
tap_has_vnet_hdr(peer)) {
return true; return true;
} }
@ -1935,10 +1933,10 @@ static void vmxnet3_net_init(VMXNET3State *s)
s->lro_supported = false; s->lro_supported = false;
if (s->peer_has_vhdr) { if (s->peer_has_vhdr) {
tap_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer, qemu_peer_set_vnet_hdr_len(qemu_get_queue(s->nic),
sizeof(struct virtio_net_hdr)); sizeof(struct virtio_net_hdr));
tap_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1); qemu_peer_using_vnet_hdr(qemu_get_queue(s->nic), 1);
} }
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);