net: remove implicit peer from offload API

The virtio_net offload APIs are used on the NIC's peer (i.e. the tap
device).  The API was defined to implicitly use nc->peer, saving the
caller the trouble.

This wasn't ideal because:
1. There are callers who have the peer but not the NIC.  Currently they
   are forced to bypass the API and access peer->info->... directly.
2. The rest of the net.h API uses nc, not nc->peer, so it is
   inconsistent.

This patch pushes nc->peer back up to callers.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2014-02-20 12:14:07 +01:00
parent 0a985b3727
commit d6085e3ace
4 changed files with 40 additions and 40 deletions

View file

@ -325,7 +325,7 @@ static void peer_test_vnet_hdr(VirtIONet *n)
return; return;
} }
n->has_vnet_hdr = qemu_peer_has_vnet_hdr(nc); n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
} }
static int peer_has_vnet_hdr(VirtIONet *n) static int peer_has_vnet_hdr(VirtIONet *n)
@ -338,7 +338,7 @@ static int peer_has_ufo(VirtIONet *n)
if (!peer_has_vnet_hdr(n)) if (!peer_has_vnet_hdr(n))
return 0; return 0;
n->has_ufo = qemu_peer_has_ufo(qemu_get_queue(n->nic)); n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
return n->has_ufo; return n->has_ufo;
} }
@ -357,8 +357,8 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
nc = qemu_get_subqueue(n->nic, i); nc = qemu_get_subqueue(n->nic, i);
if (peer_has_vnet_hdr(n) && if (peer_has_vnet_hdr(n) &&
qemu_peer_has_vnet_hdr_len(nc, n->guest_hdr_len)) { qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
qemu_peer_set_vnet_hdr_len(nc, n->guest_hdr_len); qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
n->host_hdr_len = n->guest_hdr_len; n->host_hdr_len = n->guest_hdr_len;
} }
} }
@ -459,7 +459,7 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
static void virtio_net_apply_guest_offloads(VirtIONet *n) static void virtio_net_apply_guest_offloads(VirtIONet *n)
{ {
qemu_peer_set_offload(qemu_get_subqueue(n->nic, 0), qemu_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
@ -1540,7 +1540,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
peer_test_vnet_hdr(n); peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) { if (peer_has_vnet_hdr(n)) {
for (i = 0; i < n->max_queues; i++) { for (i = 0; i < n->max_queues; i++) {
qemu_peer_using_vnet_hdr(qemu_get_subqueue(n->nic, i), true); qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
} }
n->host_hdr_len = sizeof(struct virtio_net_hdr); n->host_hdr_len = sizeof(struct virtio_net_hdr);
} else { } else {

View file

@ -1290,12 +1290,12 @@ static void vmxnet3_update_features(VMXNET3State *s)
s->lro_supported, rxcso_supported, s->lro_supported, rxcso_supported,
s->rx_vlan_stripping); s->rx_vlan_stripping);
if (s->peer_has_vhdr) { if (s->peer_has_vhdr) {
qemu_peer_set_offload(qemu_get_queue(s->nic), qemu_set_offload(qemu_get_queue(s->nic)->peer,
rxcso_supported, rxcso_supported,
s->lro_supported, s->lro_supported,
s->lro_supported, s->lro_supported,
0, 0,
0); 0);
} }
} }
@ -1885,7 +1885,7 @@ static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s)
{ {
NetClientState *nc = qemu_get_queue(s->nic); NetClientState *nc = qemu_get_queue(s->nic);
if (qemu_peer_has_vnet_hdr(nc)) { if (qemu_has_vnet_hdr(nc->peer)) {
return true; return true;
} }
@ -1933,10 +1933,10 @@ static void vmxnet3_net_init(VMXNET3State *s)
s->lro_supported = false; s->lro_supported = false;
if (s->peer_has_vhdr) { if (s->peer_has_vhdr) {
qemu_peer_set_vnet_hdr_len(qemu_get_queue(s->nic), qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
sizeof(struct virtio_net_hdr)); sizeof(struct virtio_net_hdr));
qemu_peer_using_vnet_hdr(qemu_get_queue(s->nic), 1); qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
} }
qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);

View file

@ -132,13 +132,13 @@ ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
void qemu_purge_queued_packets(NetClientState *nc); void qemu_purge_queued_packets(NetClientState *nc);
void qemu_flush_queued_packets(NetClientState *nc); void qemu_flush_queued_packets(NetClientState *nc);
void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]); void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
bool qemu_peer_has_ufo(NetClientState *nc); bool qemu_has_ufo(NetClientState *nc);
bool qemu_peer_has_vnet_hdr(NetClientState *nc); bool qemu_has_vnet_hdr(NetClientState *nc);
bool qemu_peer_has_vnet_hdr_len(NetClientState *nc, int len); bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
void qemu_peer_using_vnet_hdr(NetClientState *nc, bool enable); void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
void qemu_peer_set_offload(NetClientState *nc, int csum, int tso4, int tso6, void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo); int ecn, int ufo);
void qemu_peer_set_vnet_hdr_len(NetClientState *nc, int len); void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
void qemu_macaddr_default_if_unset(MACAddr *macaddr); void qemu_macaddr_default_if_unset(MACAddr *macaddr);
int qemu_show_nic_models(const char *arg, const char *const *models); int qemu_show_nic_models(const char *arg, const char *const *models);
void qemu_check_nic_model(NICInfo *nd, const char *model); void qemu_check_nic_model(NICInfo *nd, const char *model);

View file

@ -378,59 +378,59 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
} }
} }
bool qemu_peer_has_ufo(NetClientState *nc) bool qemu_has_ufo(NetClientState *nc)
{ {
if (!nc->peer || !nc->peer->info->has_ufo) { if (!nc || !nc->info->has_ufo) {
return false; return false;
} }
return nc->peer->info->has_ufo(nc->peer); return nc->info->has_ufo(nc);
} }
bool qemu_peer_has_vnet_hdr(NetClientState *nc) bool qemu_has_vnet_hdr(NetClientState *nc)
{ {
if (!nc->peer || !nc->peer->info->has_vnet_hdr) { if (!nc || !nc->info->has_vnet_hdr) {
return false; return false;
} }
return nc->peer->info->has_vnet_hdr(nc->peer); return nc->info->has_vnet_hdr(nc);
} }
bool qemu_peer_has_vnet_hdr_len(NetClientState *nc, int len) bool qemu_has_vnet_hdr_len(NetClientState *nc, int len)
{ {
if (!nc->peer || !nc->peer->info->has_vnet_hdr_len) { if (!nc || !nc->info->has_vnet_hdr_len) {
return false; return false;
} }
return nc->peer->info->has_vnet_hdr_len(nc->peer, len); return nc->info->has_vnet_hdr_len(nc, len);
} }
void qemu_peer_using_vnet_hdr(NetClientState *nc, bool enable) void qemu_using_vnet_hdr(NetClientState *nc, bool enable)
{ {
if (!nc->peer || !nc->peer->info->using_vnet_hdr) { if (!nc || !nc->info->using_vnet_hdr) {
return; return;
} }
nc->peer->info->using_vnet_hdr(nc->peer, enable); nc->info->using_vnet_hdr(nc, enable);
} }
void qemu_peer_set_offload(NetClientState *nc, int csum, int tso4, int tso6, void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo) int ecn, int ufo)
{ {
if (!nc->peer || !nc->peer->info->set_offload) { if (!nc || !nc->info->set_offload) {
return; return;
} }
nc->peer->info->set_offload(nc->peer, csum, tso4, tso6, ecn, ufo); nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo);
} }
void qemu_peer_set_vnet_hdr_len(NetClientState *nc, int len) void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
{ {
if (!nc->peer || !nc->peer->info->set_vnet_hdr_len) { if (!nc || !nc->info->set_vnet_hdr_len) {
return; return;
} }
nc->peer->info->set_vnet_hdr_len(nc->peer, len); nc->info->set_vnet_hdr_len(nc, len);
} }
int qemu_can_send_packet(NetClientState *sender) int qemu_can_send_packet(NetClientState *sender)