summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-03-13 22:08:01 -0400
committerDavid S. Miller <davem@davemloft.net>2016-03-13 22:08:01 -0400
commit3c4ef85155acc8d7033d668536011f92c95f1065 (patch)
treed28919993d3d10c78120e7caa77fa21a923f20b9
parent136ba622de49a6bf1f6e5eab3391ed5d5dbe30e3 (diff)
parent8e4ee59c1e75b74966476dcc3552c3b30d2768e7 (diff)
Merge branch 'xen-netback-fix-multiple-extra-info-handling'
Paul Durrant says: ==================== xen-netback: fix multiple extra info handling If a frontend passes multiple extra info fragments to netback on the guest transmit side, because xen-netback does not account for this properly, only a single ack response will be sent. This will eventually cause processing of the shared ring to wedge. This series re-imports the canonical netif.h from Xen, where the ring protocol documentation has been updated, fixes this issue in xen-netback and also adds a patch to reduce log spam. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/netback.c65
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--include/xen/interface/io/netif.h861
4 files changed, 809 insertions, 120 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 112825200d41..f44b38846420 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t;
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
+ unsigned int extra_count;
/* Callback data for released SKBs. The callback is always
* xenvif_zerocopy_callback, desc contains the pending_idx, which is
* also an index in pending_tx_info array. It is initialized in
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 61b97c34bb3b..b42f26029225 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
}
static void xenvif_tx_err(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp, RING_IDX end)
+ struct xen_netif_tx_request *txp,
+ unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
unsigned long flags;
do {
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first,
+ unsigned int extra_count,
struct xen_netif_tx_request *txp,
int work_to_do)
{
@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
} while (more_data);
if (drop_err) {
- xenvif_tx_err(queue, first, cons + slots);
+ xenvif_tx_err(queue, first, extra_count, cons + slots);
return drop_err;
}
@@ -827,9 +830,10 @@ struct xenvif_tx_cb {
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
- u16 pending_idx,
- struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *mop)
+ u16 pending_idx,
+ struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+ struct gnttab_map_grant_ref *mop)
{
queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
+ queue->pending_tx_info[pending_idx].extra_count = extra_count;
}
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
- xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
}
@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
- xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
+ gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
}
@@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
}
static int xenvif_get_extras(struct xenvif_queue *queue,
- struct xen_netif_extra_info *extras,
- int work_to_do)
+ struct xen_netif_extra_info *extras,
+ unsigned int *extra_count,
+ int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = queue->tx.req_cons;
@@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
}
RING_COPY_REQUEST(&queue->tx, cons, &extra);
+
+ queue->tx.req_cons = ++cons;
+ (*extra_count)++;
+
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
- queue->tx.req_cons = ++cons;
netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type);
xenvif_fatal_tx_err(queue->vif);
@@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
- queue->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ unsigned int extra_count;
u16 pending_idx;
RING_IDX idx;
int work_to_do;
@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
+ extra_count = 0;
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xenvif_get_extras(queue, extras,
+ &extra_count,
work_to_do);
idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0))
@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
- make_tx_response(queue, &txreq,
+ make_tx_response(queue, &txreq, extra_count,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
- make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+ make_tx_response(queue, &txreq, extra_count,
+ XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
continue;
}
- ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(queue, &txreq, extra_count,
+ txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size);
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(skb == NULL)) {
netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n");
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
- xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, &txreq,
+ extra_count, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
- memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
- sizeof(txreq));
+ memcpy(&queue->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ queue->pending_tx_info[pending_idx].extra_count =
+ extra_count;
}
queue->pending_cons++;
@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, &pending_tx_info->req, status);
+ make_tx_response(queue, &pending_tx_info->req,
+ pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
s8 st)
{
RING_IDX i = queue->tx.rsp_prod_pvt;
@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
resp->id = txp->id;
resp->status = st;
- if (txp->flags & XEN_NETTXF_extra_info)
+ while (extra_count-- != 0)
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 39a303de20dd..bd182cd55dda 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -511,8 +511,6 @@ static void set_backend_state(struct backend_info *be,
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
- pr_info("%s: prepare for reconnect\n",
- be->dev->nodename);
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosing:
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 252ffd4801ef..4f20dbc42910 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -1,16 +1,34 @@
/******************************************************************************
- * netif.h
+ * xen_netif.h
*
* Unified network-device I/O interface for Xen guest OSes.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2003-2004, Keir Fraser
*/
-#ifndef __XEN_PUBLIC_IO_NETIF_H__
-#define __XEN_PUBLIC_IO_NETIF_H__
+#ifndef __XEN_PUBLIC_IO_XEN_NETIF_H__
+#define __XEN_PUBLIC_IO_XEN_NETIF_H__
-#include <xen/interface/io/ring.h>
-#include <xen/interface/grant_table.h>
+#include "ring.h"
+#include "../grant_table.h"
/*
* Older implementation of Xen network frontend / backend has an
@@ -38,10 +56,10 @@
* that it cannot safely queue packets (as it may not be kicked to send them).
*/
- /*
+/*
* "feature-split-event-channels" is introduced to separate guest TX
- * and RX notificaion. Backend either doesn't support this feature or
- * advertise it via xenstore as 0 (disabled) or 1 (enabled).
+ * and RX notification. Backend either doesn't support this feature or
+ * advertises it via xenstore as 0 (disabled) or 1 (enabled).
*
* To make use of this feature, frontend should allocate two event
* channels for TX and RX, advertise them to backend as
@@ -118,151 +136,804 @@
*/
/*
- * This is the 'wire' format for packets:
- * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
- * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info)
- * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE)
- * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data
- * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data
+ * "feature-multicast-control" and "feature-dynamic-multicast-control"
+ * advertise the capability to filter ethernet multicast packets in the
+ * backend. If the frontend wishes to take advantage of this feature then
+ * it may set "request-multicast-control". If the backend only advertises
+ * "feature-multicast-control" then "request-multicast-control" must be set
+ * before the frontend moves into the connected state. The backend will
+ * sample the value on this state transition and any subsequent change in
+ * value will have no effect. However, if the backend also advertises
+ * "feature-dynamic-multicast-control" then "request-multicast-control"
+ * may be set by the frontend at any time. In this case, the backend will
+ * watch the value and re-sample on watch events.
+ *
+ * If the sampled value of "request-multicast-control" is set then the
+ * backend transmit side should no longer flood multicast packets to the
+ * frontend, it should instead drop any multicast packet that does not
+ * match in a filter list.
+ * The list is amended by the frontend by sending dummy transmit requests
+ * containing XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL} extra-info fragments as
+ * specified below.
+ * Note that the filter list may be amended even if the sampled value of
+ * "request-multicast-control" is not set, however the filter should only
+ * be applied if it is set.
+ */
+
+/*
+ * Control ring
+ * ============
+ *
+ * Some features, such as hashing (detailed below), require a
+ * significant amount of out-of-band data to be passed from frontend to
+ * backend. Use of xenstore is not suitable for large quantities of data
+ * because of quota limitations and so a dedicated 'control ring' is used.
+ * The ability of the backend to use a control ring is advertised by
+ * setting:
+ *
+ * /local/domain/X/backend/<domid>/<vif>/feature-ctrl-ring = "1"
+ *
+ * The frontend provides a control ring to the backend by setting:
+ *
+ * /local/domain/<domid>/device/vif/<vif>/ctrl-ring-ref = <gref>
+ * /local/domain/<domid>/device/vif/<vif>/event-channel-ctrl = <port>
+ *
+ * where <gref> is the grant reference of the shared page used to
+ * implement the control ring and <port> is an event channel to be used
+ * as a mailbox interrupt. These keys must be set before the frontend
+ * moves into the connected state.
+ *
+ * The control ring uses a fixed request/response message size and is
+ * balanced (i.e. one request to one response), so operationally it is much
+ * the same as a transmit or receive ring.
+ * Note that there is no requirement that responses are issued in the same
+ * order as requests.
+ */
+
+/*
+ * Hash types
+ * ==========
+ *
+ * For the purposes of the definitions below, 'Packet[]' is an array of
+ * octets containing an IP packet without options, 'Array[X..Y]' means a
+ * sub-array of 'Array' containing bytes X thru Y inclusive, and '+' is
+ * used to indicate concatenation of arrays.
+ */
+
+/*
+ * A hash calculated over an IP version 4 header as follows:
+ *
+ * Buffer[0..8] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address)
+ *
+ * Result = Hash(Buffer, 8)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4 0
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4)
+
+/*
+ * A hash calculated over an IP version 4 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..12] = Packet[12..15] (source address) +
+ * Packet[16..19] (destination address) +
+ * Packet[20..21] (source port) +
+ * Packet[22..23] (destination port)
+ *
+ * Result = Hash(Buffer, 12)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP 1
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
+
+/*
+ * A hash calculated over an IP version 6 header as follows:
+ *
+ * Buffer[0..32] = Packet[8..23] (source address ) +
+ * Packet[24..39] (destination address)
+ *
+ * Result = Hash(Buffer, 32)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6 2
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6 \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6)
+
+/*
+ * A hash calculated over an IP version 6 header and TCP header as
+ * follows:
+ *
+ * Buffer[0..36] = Packet[8..23] (source address) +
+ * Packet[24..39] (destination address) +
+ * Packet[40..41] (source port) +
+ * Packet[42..43] (destination port)
+ *
+ * Result = Hash(Buffer, 36)
+ */
+#define _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP 3
+#define XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP \
+ (1 << _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
+
+/*
+ * Hash algorithms
+ * ===============
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_NONE 0
+
+/*
+ * Toeplitz hash:
+ */
+
+#define XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ 1
+
+/*
+ * This algorithm uses a 'key' as well as the data buffer itself.
+ * (Buffer[] and Key[] are treated as shift-registers where the MSB of
+ * Buffer/Key[0] is considered 'left-most' and the LSB of Buffer/Key[N-1]
+ * is the 'right-most').
+ *
+ * Value = 0
+ * For number of bits in Buffer[]
+ * If (left-most bit of Buffer[] is 1)
+ * Value ^= left-most 32 bits of Key[]
+ * Key[] << 1
+ * Buffer[] << 1
+ *
+ * The code below is provided for convenience where an operating system
+ * does not already provide an implementation.
+ */
+#ifdef XEN_NETIF_DEFINE_TOEPLITZ
+static uint32_t xen_netif_toeplitz_hash(const uint8_t *key,
+ unsigned int keylen,
+ const uint8_t *buf, unsigned int buflen)
+{
+ unsigned int keyi, bufi;
+ uint64_t prefix = 0;
+ uint64_t hash = 0;
+
+ /* Pre-load prefix with the first 8 bytes of the key */
+ for (keyi = 0; keyi < 8; keyi++) {
+ prefix <<= 8;
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ }
+
+ for (bufi = 0; bufi < buflen; bufi++) {
+ uint8_t byte = buf[bufi];
+ unsigned int bit;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ prefix <<= 1;
+ byte <<= 1;
+ }
+
+ /*
+ * 'prefix' has now been left-shifted by 8, so
+ * OR in the next byte.
+ */
+ prefix |= (keyi < keylen) ? key[keyi] : 0;
+ keyi++;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return hash >> 32;
+}
+#endif /* XEN_NETIF_DEFINE_TOEPLITZ */
+
+/*
+ * Control requests (struct xen_netif_ctrl_request)
+ * ================================================
+ *
+ * All requests have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | data[0] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data[1] | data[2] |
+ * +-----+-----+-----+-----+-----------------------+
+ *
+ * id: the request identifier, echoed in response.
+ * type: the type of request (see below)
+ * data[]: any data associated with the request (determined by type)
+ */
+
+struct xen_netif_ctrl_request {
+ uint16_t id;
+ uint16_t type;
+
+#define XEN_NETIF_CTRL_TYPE_INVALID 0
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS 1
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS 2
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_KEY 3
+#define XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE 4
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE 5
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING 6
+#define XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM 7
+
+ uint32_t data[3];
+};
+
+/*
+ * Control responses (struct xen_netif_ctrl_response)
+ * ==================================================
+ *
+ * All responses have the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | type | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | data |
+ * +-----+-----+-----+-----+
+ *
+ * id: the corresponding request identifier
+ * type: the type of the corresponding request
+ * status: the status of request processing
+ * data: any data associated with the response (determined by type and
+ * status)
+ */
+
+struct xen_netif_ctrl_response {
+ uint16_t id;
+ uint16_t type;
+ uint32_t status;
+
+#define XEN_NETIF_CTRL_STATUS_SUCCESS 0
+#define XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED 1
+#define XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER 2
+#define XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW 3
+
+ uint32_t data;
+};
+
+/*
+ * Control messages
+ * ================
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * --------------------------------------
+ *
+ * This is sent by the frontend to set the desired hash algorithm.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
+ * data[0] = a XEN_NETIF_CTRL_HASH_ALGORITHM_* value
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - The algorithm is not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ *
+ * NOTE: Setting data[0] to XEN_NETIF_CTRL_HASH_ALGORITHM_NONE disables
+ * hashing and the backend is free to choose how it steers packets
+ * to queues (which is the default behaviour).
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to query the types of hash supported by
+ * the backend.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = supported hash types (if operation was successful)
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * ----------------------------------
+ *
+ * This is sent by the frontend to set the types of hash that the backend
+ * should calculate. (See above for hash type definitions).
+ * Note that the 'maximal' type of hash should always be chosen. For
+ * example, if the frontend sets both IPV4 and IPV4_TCP hash types then
+ * the latter hash type should be calculated for any TCP packet and the
+ * former only calculated for non-TCP packets.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
+ * data[0] = bitwise OR of XEN_NETIF_CTRL_HASH_TYPE_* values
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - One or more flag
+ * value is invalid or
+ * unsupported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: A valid hash algorithm must be selected before this operation can
+ * succeed.
+ * Also, setting data[0] to zero disables hashing and the backend
+ * is free to choose how it steers packets to queues.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * --------------------------------
+ *
+ * This is sent by the frontend to set the key of the hash if the algorithm
+ * requires it. (See hash algorithms above).
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
+ * data[0] = grant reference of page containing the key (assumed to
+ * start at beginning of grant)
+ * data[1] = size of key in octets
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Key size is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Key size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Any key octets not specified are assumed to be zero (the key
+ * is assumed to be empty by default) and specifying a new key
+ * invalidates any previous key, hence specifying a key size of
+ * zero will clear the key (which ensures that the calculated hash
+ * will always be zero).
+ * The maximum size of key is algorithm and backend specific, but
+ * is also limited by the single grant reference.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ *
+ * XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * -----------------------------------------
+ *
+ * This is sent by the frontend to query the maximum size of mapping
+ * table supported by the backend. The size is specified in terms of
+ * table entries.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
+ * data[0] = 0
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not supported
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = maximum number of entries allowed in the mapping table
+ * (if operation was successful) or zero if a mapping table is
+ * not supported (i.e. hash mapping is done only by modular
+ * arithmetic).
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * -------------------------------------
+ *
+ * This is sent by the frontend to set the actual size of the mapping
+ * table to be used by the backend. The size is specified in terms of
+ * table entries.
+ * Any previous table is invalidated by this message and any new table
+ * is assumed to be zero filled.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * data[0] = number of entries in mapping table
+ * data[1] = 0
+ * data[2] = 0
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size is invalid
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: Setting data[0] to 0 means that hash mapping should be done
+ * using modular arithmetic.
+ *
+ * XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * ------------------------------------
+ *
+ * This is sent by the frontend to set the content of the table mapping
+ * hash value to queue number. The backend should calculate the hash from
+ * the packet header, use it as an index into the table (modulo the size
+ * of the table) and then steer the packet to the queue number found at
+ * that index.
+ *
+ * Request:
+ *
+ * type = XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
+ * data[0] = grant reference of page containing the mapping (sub-)table
+ * (assumed to start at beginning of grant)
+ * data[1] = size of (sub-)table in entries
+ * data[2] = offset, in entries, of sub-table within overall table
+ *
+ * Response:
+ *
+ * status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED - Operation not
+ * supported
+ * XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER - Table size or content
+ * is invalid
+ * XEN_NETIF_CTRL_STATUS_BUFFER_OVERFLOW - Table size is larger
+ * than the backend
+ * supports
+ * XEN_NETIF_CTRL_STATUS_SUCCESS - Operation successful
+ * data = 0
+ *
+ * NOTE: The overall table has the following format:
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[0] | mapping[1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | . |
+ * | . |
+ * | . |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | mapping[N-2] | mapping[N-1] |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * where N is specified by a XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
+ * message and each mapping must specifies a queue between 0 and
+ * "multi-queue-num-queues" (see above).
+ * The backend may support a mapping table larger than can be
+ * mapped by a single grant reference. Thus sub-tables within a
+ * larger table can be individually set by sending multiple messages
+ * with differing offset values. Specifying a new sub-table does not
+ * invalidate any table data outside that range.
+ * The grant reference may be read-only and must remain valid until
+ * the response has been processed.
+ */
+
+DEFINE_RING_TYPES(xen_netif_ctrl,
+ struct xen_netif_ctrl_request,
+ struct xen_netif_ctrl_response);
+
+/*
+ * Guest transmit
+ * ==============
+ *
+ * This is the 'wire' format for transmit (frontend -> backend) packets:
+ *
+ * Fragment 1: xen_netif_tx_request_t - flags = XEN_NETTXF_*
+ * size = total packet size
+ * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
+ * XEN_NETTXF_extra_info)
+ * ...
+ * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
* ...
- * Request N: xen_netif_tx_request -- 0
+ * Fragment N: xen_netif_tx_request_t - (only if fragment N-1 flags include
+ * XEN_NETTXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for receive
+ * (backend -> frontend) packets. Specifically, in a multi-fragment
+ * packet the actual size of fragment 1 can only be determined by
+ * subtracting the sizes of fragments 2..N from the total packet size.
+ *
+ * Ring slot size is 12 octets, however not all request/response
+ * structs use the full size.
+ *
+ * tx request data (xen_netif_tx_request_t)
+ * ------------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | grant ref | offset | flags |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | size |
+ * +-----+-----+-----+-----+
+ *
+ * grant ref: Reference to buffer page.
+ * offset: Offset within buffer page.
+ * flags: XEN_NETTXF_*.
+ * id: request identifier, echoed in response.
+ * size: packet size in bytes.
+ *
+ * tx response (xen_netif_tx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | status | unused |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | unused |
+ * +-----+-----+-----+-----+
+ *
+ * id: reflects id in transmit request
+ * status: XEN_NETIF_RSP_*
+ *
+ * Guest receive
+ * =============
+ *
+ * This is the 'wire' format for receive (backend -> frontend) packets:
+ *
+ * Fragment 1: xen_netif_rx_request_t - flags = XEN_NETRXF_*
+ * size = fragment size
+ * [Extra 1: xen_netif_extra_info_t] - (only if fragment 1 flags include
+ * XEN_NETRXF_extra_info)
+ * ...
+ * [Extra N: xen_netif_extra_info_t] - (only if extra N-1 flags include
+ * XEN_NETIF_EXTRA_MORE)
+ * ...
+ * Fragment N: xen_netif_rx_request_t - (only if fragment N-1 flags include
+ * XEN_NETRXF_more_data - flags on preceding
+ * extras are not relevant here)
+ * flags = 0
+ * size = fragment size
+ *
+ * NOTE:
+ *
+ * This format slightly is different from that used for transmit
+ * (frontend -> backend) packets. Specifically, in a multi-fragment
+ * packet the size of the packet can only be determined by summing the
+ * sizes of fragments 1..N.
+ *
+ * Ring slot size is 8 octets.
+ *
+ * rx request (xen_netif_rx_request_t)
+ * -------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | pad | gref |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: request identifier, echoed in response.
+ * gref: reference to incoming granted frame.
+ *
+ * rx response (xen_netif_rx_response_t)
+ * ---------------------------------
+ *
+ * 0 1 2 3 4 5 6 7 octet
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | id | offset | flags | status |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * id: reflects id in receive request
+ * offset: offset in page of start of received packet
+ * flags: XEN_NETRXF_*
+ * status: -ve: XEN_NETIF_RSP_*; +ve: Rx'ed pkt size.
+ *
+ * NOTE: Historically, to support GSO on the frontend receive side, Linux
+ * netfront does not make use of the rx response id (because, as
+ * described below, extra info structures overlay the id field).
+ * Instead it assumes that responses always appear in the same ring
+ * slot as their corresponding request. Thus, to maintain
+ * compatibility, backends must make sure this is the case.
+ *
+ * Extra Info
+ * ==========
+ *
+ * Can be present if initial request or response has NET{T,R}XF_extra_info,
+ * or previous extra request has XEN_NETIF_EXTRA_MORE.
+ *
+ * The struct therefore needs to fit into either a tx or rx slot and
+ * is therefore limited to 8 octets.
+ *
+ * NOTE: Because extra info data overlays the usual request/response
+ * structures, there is no id information in the opposite direction.
+ * So, if an extra i