From 8536aa06f7d7d0eaed112b869ea07cba75eb05d8 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Tue, 19 Jul 2016 10:56:17 +0300 Subject: fsl/fman: split lines over 80 characters Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman_muram.c | 3 ++- drivers/net/ethernet/freescale/fman/fman_muram.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index 47394c45b6e8..5ec94d243da0 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -150,7 +150,8 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size) * * Free an allocated memory from FM-MURAM partition. */ -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size) +void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, + size_t size) { unsigned long addr = fman_muram_offset_to_vbase(muram, offset); diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h index 889649ad8931..453bf849eee1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.h +++ b/drivers/net/ethernet/freescale/fman/fman_muram.h @@ -46,6 +46,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram, unsigned long fman_muram_alloc(struct muram_info *muram, size_t size); -void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size); +void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, + size_t size); #endif /* __FM_MURAM_EXT */ -- cgit v1.2.3 From 29c4684e76193bc318305b5d8ebe40a4141f029e Mon Sep 17 00:00:00 2001 From: Igal Liberman Date: Sat, 9 Jan 2016 23:16:33 +0200 Subject: fsl/fman: fix loadable module compilation Signed-off-by: Igal Liberman --- drivers/net/ethernet/freescale/fman/Makefile | 10 ++++--- drivers/net/ethernet/freescale/fman/fman.c | 35 +++++++++++++++++++++++-- drivers/net/ethernet/freescale/fman/fman_port.c | 23 +++++++++++++++- drivers/net/ethernet/freescale/fman/fman_sp.c | 3 +++ 4 files changed, 64 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile index 51fd2e6c1b84..60491779e49f 100644 --- a/drivers/net/ethernet/freescale/fman/Makefile +++ b/drivers/net/ethernet/freescale/fman/Makefile @@ -1,7 +1,9 @@ subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman -obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o +obj-$(CONFIG_FSL_FMAN) += fsl_fman.o +obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o +obj-$(CONFIG_FSL_FMAN) += fsl_mac.o -fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o -fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o -fsl_mac-objs += mac.o +fsl_fman-objs := fman_muram.o fman.o fman_sp.o +fsl_fman_port-objs := fman_port.o +fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 1de2e1e51c2b..ef5f22862ef3 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2115,6 +2115,7 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module, fman->intr_mng[event].isr_cb = isr_cb; fman->intr_mng[event].src_handle = src_arg; } +EXPORT_SYMBOL(fman_register_intr); /** * fman_unregister_intr @@ -2138,6 +2139,7 @@ void fman_unregister_intr(struct fman *fman, enum fman_event_modules module, fman->intr_mng[event].isr_cb = NULL; fman->intr_mng[event].src_handle = NULL; } +EXPORT_SYMBOL(fman_unregister_intr); /** * fman_set_port_params @@ -2241,6 +2243,7 @@ return_err: spin_unlock_irqrestore(&fman->spinlock, flags); return err; } +EXPORT_SYMBOL(fman_set_port_params); /** * fman_reset_mac @@ -2310,6 +2313,7 @@ int fman_reset_mac(struct fman *fman, u8 mac_id) return 0; } +EXPORT_SYMBOL(fman_reset_mac); /** * fman_set_mac_max_frame @@ -2337,6 +2341,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) } return 0; } +EXPORT_SYMBOL(fman_set_mac_max_frame); /** * fman_get_clock_freq @@ -2363,6 +2368,7 @@ u32 fman_get_bmi_max_fifo_size(struct fman *fman) { return fman->state->bmi_max_fifo_size; } +EXPORT_SYMBOL(fman_get_bmi_max_fifo_size); /** * fman_get_revision @@ -2384,6 +2390,7 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info) FPM_REV1_MAJOR_SHIFT); rev_info->minor = tmp & FPM_REV1_MINOR_MASK; } +EXPORT_SYMBOL(fman_get_revision); /** * fman_get_qman_channel_id @@ -2419,6 +2426,7 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id) return fman->state->qman_channel_base + i; } +EXPORT_SYMBOL(fman_get_qman_channel_id); /** * fman_get_mem_region @@ -2432,6 +2440,7 @@ struct resource *fman_get_mem_region(struct fman *fman) { return fman->state->res; } +EXPORT_SYMBOL(fman_get_mem_region); /* Bootargs defines */ /* Extra headroom for RX buffers - Default, min and max */ @@ -2538,6 +2547,7 @@ struct fman *fman_bind(struct device *fm_dev) { return (struct fman *)(dev_get_drvdata(get_device(fm_dev))); } +EXPORT_SYMBOL(fman_bind); static irqreturn_t fman_err_irq(int irq, void *handle) { @@ -2930,7 +2940,7 @@ static const struct of_device_id fman_match[] = { {} }; -MODULE_DEVICE_TABLE(of, fm_match); +MODULE_DEVICE_TABLE(of, fman_match); static struct platform_driver fman_driver = { .driver = { @@ -2940,4 +2950,25 @@ static struct platform_driver fman_driver = { .probe = fman_probe, }; -builtin_platform_driver(fman_driver); +static int __init fman_load(void) +{ + int err; + + pr_debug("FSL DPAA FMan driver\n"); + + err = platform_driver_register(&fman_driver); + if (err < 0) + pr_err("Error, platform_driver_register() = %d\n", err); + + return err; +} +module_init(fman_load); + +static void __exit fman_unload(void) +{ + platform_driver_unregister(&fman_driver); +} +module_exit(fman_unload); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver"); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 70c198d072dc..6de808e2593a 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1775,4 +1775,25 @@ static struct platform_driver fman_port_driver = { .probe = fman_port_probe, }; -builtin_platform_driver(fman_port_driver); +static int __init fman_port_load(void) +{ + int err; + + pr_debug("FSL DPAA FMan driver\n"); + + err = platform_driver_register(&fman_port_driver); + if (err < 0) + pr_err("Error, platform_driver_register() = %d\n", err); + + return err; +} +module_init(fman_port_load); + +static void __exit fman_port_unload(void) +{ + platform_driver_unregister(&fman_port_driver); +} +module_exit(fman_port_unload); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver"); diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c index f9e7aa385cba..248f5bcca468 100644 --- a/drivers/net/ethernet/freescale/fman/fman_sp.c +++ b/drivers/net/ethernet/freescale/fman/fman_sp.c @@ -80,6 +80,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools } } } +EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes); int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * int_context_data_copy, @@ -164,3 +165,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy * return 0; } +EXPORT_SYMBOL(fman_sp_build_buffer_struct); + -- cgit v1.2.3 From 5df6f7fa47e0306bdbb94ac7f3545697ff92d7d2 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Tue, 22 Mar 2016 10:27:16 +0200 Subject: fsl/fman: small fixes Make module params static, proper NULL checks, remove __iomem label when misused. Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman.c | 6 +++--- drivers/net/ethernet/freescale/fman/fman_port.c | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index ef5f22862ef3..fb2574878958 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2462,7 +2462,7 @@ EXPORT_SYMBOL(fman_get_mem_region); * particular forwarding scenarios that add extra headers to the * forwarded frame. */ -int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; +static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM; module_param(fsl_fm_rx_extra_headroom, int, 0); MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); @@ -2475,7 +2475,7 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); * Could be overridden once, at boot-time, via the * fm_set_max_frm() callback. */ -int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; +static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; module_param(fsl_fm_max_frm, int, 0); MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces"); @@ -2868,7 +2868,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) fman->dts_params.base_addr = devm_ioremap(&of_dev->dev, phys_base_addr, mem_size); - if (fman->dts_params.base_addr == 0) { + if (!fman->dts_params.base_addr) { dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__); goto fman_free; } diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 6de808e2593a..8b043e7b070b 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1477,7 +1477,8 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content); */ int fman_port_disable(struct fman_port *port) { - u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp; + u32 __iomem *bmi_cfg_reg, *bmi_status_reg; + u32 tmp; bool rx_port, failure = false; int count; @@ -1553,7 +1554,8 @@ EXPORT_SYMBOL(fman_port_disable); */ int fman_port_enable(struct fman_port *port) { - u32 __iomem *bmi_cfg_reg, tmp; + u32 __iomem *bmi_cfg_reg; + u32 tmp; bool rx_port; if (!is_init_done(port->cfg)) @@ -1743,7 +1745,7 @@ static int fman_port_probe(struct platform_device *of_dev) port->dts_params.base_addr = devm_ioremap(port->dev, res.start, resource_size(&res)); - if (port->dts_params.base_addr == 0) + if (!port->dts_params.base_addr) dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__); dev_set_drvdata(&of_dev->dev, port); -- cgit v1.2.3 From 6fa8519274db638fff104fffeadbffc089499244 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Thu, 7 Apr 2016 12:50:16 +0300 Subject: fsl/fman: use of_get_phy_mode() Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/mac.c | 33 ++----------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index e33d9d24c1db..f94fad7884df 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -583,31 +583,6 @@ static void setup_memac(struct mac_device *mac_dev) static DEFINE_MUTEX(eth_lock); -static const char phy_str[][11] = { - [PHY_INTERFACE_MODE_MII] = "mii", - [PHY_INTERFACE_MODE_GMII] = "gmii", - [PHY_INTERFACE_MODE_SGMII] = "sgmii", - [PHY_INTERFACE_MODE_TBI] = "tbi", - [PHY_INTERFACE_MODE_RMII] = "rmii", - [PHY_INTERFACE_MODE_RGMII] = "rgmii", - [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", - [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", - [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", - [PHY_INTERFACE_MODE_RTBI] = "rtbi", - [PHY_INTERFACE_MODE_XGMII] = "xgmii" -}; - -static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(phy_str); i++) - if (strcmp(str, phy_str[i]) == 0) - return (phy_interface_t)i; - - return PHY_INTERFACE_MODE_MII; -} - static const u16 phy2speed[] = { [PHY_INTERFACE_MODE_MII] = SPEED_100, [PHY_INTERFACE_MODE_GMII] = SPEED_1000, @@ -686,7 +661,6 @@ static int mac_probe(struct platform_device *_of_dev) struct resource res; struct mac_priv_s *priv; const u8 *mac_addr; - const char *char_prop; const u32 *u32_prop; u8 fman_id; @@ -870,15 +844,12 @@ static int mac_probe(struct platform_device *_of_dev) } /* Get the PHY connection type */ - char_prop = (const char *)of_get_property(mac_node, - "phy-connection-type", NULL); - if (!char_prop) { + priv->phy_if = of_get_phy_mode(mac_node); + if (priv->phy_if < 0) { dev_warn(dev, "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n", mac_node->full_name); priv->phy_if = PHY_INTERFACE_MODE_MII; - } else { - priv->phy_if = str2phy(char_prop); } priv->speed = phy2speed[priv->phy_if]; -- cgit v1.2.3 From 537a31658f8a01d635eb628eff5895672ac03981 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 May 2016 16:57:14 +0300 Subject: fsl/fman: simplify device tree reads Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman.c | 30 ++++++++++--------------- drivers/net/ethernet/freescale/fman/fman_port.c | 24 ++++++-------------- drivers/net/ethernet/freescale/fman/mac.c | 24 +++++++++----------- 3 files changed, 30 insertions(+), 48 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index fb2574878958..2278bbd6bdfe 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2737,8 +2737,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) struct fman *fman; struct device_node *fm_node, *muram_node; struct resource *res; - const u32 *u32_prop; - int lenp, err, irq; + u32 val, range[2]; + int err, irq; struct clk *clk; u32 clk_rate; phys_addr_t phys_base_addr; @@ -2750,16 +2750,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev) fm_node = of_node_get(of_dev->dev.of_node); - u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp); - if (!u32_prop) { - dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n", + err = of_property_read_u32(fm_node, "cell-index", &val); + if (err) { + dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n", __func__, fm_node->full_name); goto fman_node_put; } - if (WARN_ON(lenp != sizeof(u32))) - goto fman_node_put; - - fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]); + fman->dts_params.id = (u8)val; /* Get the FM interrupt */ res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0); @@ -2806,18 +2803,15 @@ static struct fman *read_dts_node(struct platform_device *of_dev) /* Rounding to MHz */ fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000); - u32_prop = (const u32 *)of_get_property(fm_node, - "fsl,qman-channel-range", - &lenp); - if (!u32_prop) { - dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n", + err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", + &range[0], 2); + if (err) { + dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n", __func__, fm_node->full_name); goto fman_node_put; } - if (WARN_ON(lenp != sizeof(u32) * 2)) - goto fman_node_put; - fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]); - fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]); + fman->dts_params.qman_channel_base = range[0]; + fman->dts_params.num_of_qman_channels = range[1]; /* Get the MURAM base address and size */ muram_node = of_find_matching_node(fm_node, fman_muram_match); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 8b043e7b070b..9f3bb50a2365 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1625,7 +1625,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct device_node *fm_node, *port_node; struct resource res; struct resource *dev_res; - const u32 *u32_prop; + u32 val; int err = 0, lenp; enum fman_port_type port_type; u16 port_speed; @@ -1654,28 +1654,20 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp); - if (!u32_prop) { - dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n", + err = of_property_read_u32(port_node, "cell-index", &val); + if (err) { + dev_err(port->dev, "%s: reading cell-index for %s failed\n", __func__, port_node->full_name); err = -EINVAL; goto return_err; } - if (WARN_ON(lenp != sizeof(u32))) { - err = -EINVAL; - goto return_err; - } - port_id = (u8)fdt32_to_cpu(u32_prop[0]); - + port_id = (u8)val; port->dts_params.id = port_id; if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) { port_type = FMAN_PORT_TYPE_TX; port_speed = 1000; - u32_prop = (const u32 *)of_get_property(port_node, - "fsl,fman-10g-port", - &lenp); - if (u32_prop) + if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) { @@ -1688,9 +1680,7 @@ static int fman_port_probe(struct platform_device *of_dev) } else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) { port_type = FMAN_PORT_TYPE_RX; port_speed = 1000; - u32_prop = (const u32 *)of_get_property(port_node, - "fsl,fman-10g-port", &lenp); - if (u32_prop) + if (of_find_property(port_node, "fsl,fman-10g-port", &lenp)) port_speed = 10000; } else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) { diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index f94fad7884df..dc04e617af8d 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -653,7 +653,7 @@ MODULE_DEVICE_TABLE(of, mac_match); static int mac_probe(struct platform_device *_of_dev) { - int err, i, lenp, nph; + int err, i, nph; struct device *dev; struct device_node *mac_node, *dev_node; struct mac_device *mac_dev; @@ -661,7 +661,7 @@ static int mac_probe(struct platform_device *_of_dev) struct resource res; struct mac_priv_s *priv; const u8 *mac_addr; - const u32 *u32_prop; + u32 val; u8 fman_id; dev = &_of_dev->dev; @@ -723,16 +723,15 @@ static int mac_probe(struct platform_device *_of_dev) } /* Get the FMan cell-index */ - u32_prop = of_get_property(dev_node, "cell-index", &lenp); - if (!u32_prop) { - dev_err(dev, "of_get_property(%s, cell-index) failed\n", + err = of_property_read_u32(dev_node, "cell-index", &val); + if (err) { + dev_err(dev, "failed to read cell-index for %s\n", dev_node->full_name); err = -EINVAL; goto _return_of_node_put; } - WARN_ON(lenp != sizeof(u32)); /* cell-index 0 => FMan id 1 */ - fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1); + fman_id = (u8)(val + 1); priv->fman = fman_bind(&of_dev->dev); if (!priv->fman) { @@ -779,15 +778,14 @@ static int mac_probe(struct platform_device *_of_dev) } /* Get the cell-index */ - u32_prop = of_get_property(mac_node, "cell-index", &lenp); - if (!u32_prop) { - dev_err(dev, "of_get_property(%s, cell-index) failed\n", + err = of_property_read_u32(mac_node, "cell-index", &val); + if (err) { + dev_err(dev, "failed to read cell-index for %s\n", mac_node->full_name); err = -EINVAL; goto _return_dev_set_drvdata; } - WARN_ON(lenp != sizeof(u32)); - priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]); + priv->cell_index = (u8)val; /* Get the MAC address */ mac_addr = of_get_mac_address(mac_node); @@ -847,7 +845,7 @@ static int mac_probe(struct platform_device *_of_dev) priv->phy_if = of_get_phy_mode(mac_node); if (priv->phy_if < 0) { dev_warn(dev, - "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n", + "of_get_phy_mode() for %s failed. Defaulting to MII\n", mac_node->full_name); priv->phy_if = PHY_INTERFACE_MODE_MII; } -- cgit v1.2.3 From 73c364e110015690f13336544a51aab685bd5f23 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Tue, 24 May 2016 16:33:54 +0300 Subject: fsl/fman: return a phy_dev pointer from init Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/mac.c | 22 +++++++++++----------- drivers/net/ethernet/freescale/fman/mac.h | 3 ++- 2 files changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index dc04e617af8d..e4378c2c1e6a 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -469,9 +469,9 @@ static void adjust_link_memac(struct net_device *net_dev) /* Initializes driver's PHY state, and attaches to the PHY. * Returns 0 on success. */ -static int init_phy(struct net_device *net_dev, - struct mac_device *mac_dev, - void (*adj_lnk)(struct net_device *)) +static struct phy_device *init_phy(struct net_device *net_dev, + struct mac_device *mac_dev, + void (*adj_lnk)(struct net_device *)) { struct phy_device *phy_dev; struct mac_priv_s *priv = mac_dev->priv; @@ -480,7 +480,7 @@ static int init_phy(struct net_device *net_dev, priv->phy_if); if (!phy_dev) { netdev_err(net_dev, "Could not connect to PHY\n"); - return -ENODEV; + return NULL; } /* Remove any features not supported by the controller */ @@ -493,23 +493,23 @@ static int init_phy(struct net_device *net_dev, mac_dev->phy_dev = phy_dev; - return 0; + return phy_dev; } -static int dtsec_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) +static struct phy_device *dtsec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) { return init_phy(net_dev, mac_dev, &adjust_link_dtsec); } -static int tgec_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) +static struct phy_device *tgec_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) { return init_phy(net_dev, mac_dev, adjust_link_void); } -static int memac_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) +static struct phy_device *memac_init_phy(struct net_device *net_dev, + struct mac_device *mac_dev) { return init_phy(net_dev, mac_dev, &adjust_link_memac); } diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 0211cc9a46d6..d7313f0c5135 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -58,7 +58,8 @@ struct mac_device { bool tx_pause_active; bool promisc; - int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev); + struct phy_device *(*init_phy)(struct net_device *net_dev, + struct mac_device *mac_dev); int (*init)(struct mac_device *mac_dev); int (*start)(struct mac_device *mac_dev); int (*stop)(struct mac_device *mac_dev); -- cgit v1.2.3 From 44045e45abbda3e6db7ca0ef3e460e6ed03419c9 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Fri, 24 Jun 2016 12:25:05 +0300 Subject: fsl/fman: MEMAC may use QSGMII PHY interface mode Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman_memac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 45e98fd8b79e..96dfe7eb4e5f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1151,7 +1151,8 @@ struct fman_mac *memac_config(struct fman_mac_params *params) /* Save FMan revision */ fman_get_revision(memac->fm, &memac->fm_rev_info); - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { + if (memac->phy_if == PHY_INTERFACE_MODE_SGMII || + memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { if (!params->internal_phy_node) { pr_err("PCS PHY node is not available\n"); memac_free(memac); -- cgit v1.2.3 From 47256192c65b786ea9dbebf47f45b938a02e3914 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Tue, 28 Jun 2016 15:32:44 +0300 Subject: fsl/fman: check pcsphy pointer before use Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman_memac.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 96dfe7eb4e5f..53ef51e3bd9e 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -507,6 +507,9 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac, { u16 tmp_reg16; + if (WARN_ON(!memac->pcsphy)) + return; + /* SGMII mode */ tmp_reg16 = IF_MODE_SGMII_EN; if (!fixed_link) -- cgit v1.2.3 From 604104fc549a32ae928435a48de6761af13836ea Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Thu, 30 Jun 2016 16:48:05 +0300 Subject: fsl/fman: check of_get_phy_mode() return value For unknown compatibles avoid crashing and default to SGMII. Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/mac.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index e4378c2c1e6a..8fe6b3e253fa 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -663,6 +663,7 @@ static int mac_probe(struct platform_device *_of_dev) const u8 *mac_addr; u32 val; u8 fman_id; + int phy_if; dev = &_of_dev->dev; mac_node = dev->of_node; @@ -842,13 +843,14 @@ static int mac_probe(struct platform_device *_of_dev) } /* Get the PHY connection type */ - priv->phy_if = of_get_phy_mode(mac_node); - if (priv->phy_if < 0) { + phy_if = of_get_phy_mode(mac_node); + if (phy_if < 0) { dev_warn(dev, - "of_get_phy_mode() for %s failed. Defaulting to MII\n", + "of_get_phy_mode() for %s failed. Defaulting to SGMII\n", mac_node->full_name); - priv->phy_if = PHY_INTERFACE_MODE_MII; + phy_if = PHY_INTERFACE_MODE_SGMII; } + priv->phy_if = phy_if; priv->speed = phy2speed[priv->phy_if]; priv->max_speed = priv->speed; -- cgit v1.2.3 From 73912d51d6c80096e95d29118b48c53393890bf2 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Thu, 7 Jul 2016 15:25:24 +0300 Subject: fsl/fman: simplify redundant condition Change suggested by David Binderman, thanks. Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 2278bbd6bdfe..1fc10493a6b1 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2331,8 +2331,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl) * or equal to the port's max */ if ((!fman->state->port_mfl[mac_id]) || - (fman->state->port_mfl[mac_id] && - (mfl <= fman->state->port_mfl[mac_id]))) { + (mfl <= fman->state->port_mfl[mac_id])) { fman->state->mac_mfl[mac_id] = mfl; } else { dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n", -- cgit v1.2.3 From 0af46590d415ee672f9056c7cda5da63e02dbebf Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Tue, 12 Jul 2016 18:08:52 +0300 Subject: fsl/fman: fix return value checking Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 1fc10493a6b1..dafd9e1baba2 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -618,7 +618,7 @@ struct fman { unsigned long cam_offset; size_t cam_size; /* Fifo in MURAM */ - int fifo_offset; + unsigned long fifo_offset; size_t fifo_size; u32 liodn_base[64]; @@ -2036,7 +2036,7 @@ static int fman_init(struct fman *fman) /* allocate MURAM for FIFO according to total size */ fman->fifo_offset = fman_muram_alloc(fman->muram, fman->state->total_fifo_size); - if (IS_ERR_VALUE(fman->cam_offset)) { + if (IS_ERR_VALUE(fman->fifo_offset)) { free_init_resources(fman); dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n", __func__); -- cgit v1.2.3 From 07d8aafb3ef833fbc3890b36bf2822ce47209621 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Wed, 14 Sep 2016 15:08:43 +0300 Subject: fsl/fman: remove leftover comment Signed-off-by: Madalin Bucur --- drivers/net/ethernet/freescale/fman/fman_mac.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index ddf0260176c9..dd6d0526f6c1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -191,10 +191,6 @@ struct fman_mac_params { u16 max_speed; /* A handle to the FM object this port related to */ void *fm; - /* MDIO exceptions interrupt source - not valid for all - * MACs; MUST be set to 0 for MACs that don't have - * mdio-irq, or for polling - */ void *dev_id; /* device cookie used by the exception cbs */ fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ -- cgit v1.2.3 From 3254f83694fe519ac18b8334a2f481d80c3a8a3a Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Tue, 4 Oct 2016 10:29:12 +0100 Subject: xen-netback: separate guest side rx code into separate module The netback source module has become very large and somewhat confusing. This patch simply moves all code related to the backend to frontend (i.e guest side rx) data-path into a separate rx source module. This patch contains no functional change, it is code movement and minimal changes to avoid patch style-check issues. Signed-off-by: Paul Durrant Signed-off-by: David S. Miller --- drivers/net/xen-netback/Makefile | 2 +- drivers/net/xen-netback/netback.c | 754 ------------------------------------ drivers/net/xen-netback/rx.c | 789 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 790 insertions(+), 755 deletions(-) create mode 100644 drivers/net/xen-netback/rx.c (limited to 'drivers/net') diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile index 11e02be9db1a..d49798a46b51 100644 --- a/drivers/net/xen-netback/Makefile +++ b/drivers/net/xen-netback/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o -xen-netback-y := netback.o xenbus.o interface.o hash.o +xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3d0c989384b5..47b481095d77 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -106,13 +106,6 @@ static void push_tx_responses(struct xenvif_queue *queue); static inline int tx_work_todo(struct xenvif_queue *queue); -static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, - u16 id, - s8 st, - u16 offset, - u16 size, - u16 flags); - static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, u16 idx) { @@ -155,571 +148,11 @@ static inline pending_ring_idx_t pending_index(unsigned i) return i & (MAX_PENDING_REQS-1); } -static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) -{ - RING_IDX prod, cons; - struct sk_buff *skb; - int needed; - - skb = skb_peek(&queue->rx_queue); - if (!skb) - return false; - - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); - if (skb_is_gso(skb)) - needed++; - if (skb->sw_hash) - needed++; - - do { - prod = queue->rx.sring->req_prod; - cons = queue->rx.req_cons; - - if (prod - cons >= needed) - return true; - - queue->rx.sring->req_event = prod + 1; - - /* Make sure event is visible before we check prod - * again. - */ - mb(); - } while (queue->rx.sring->req_prod != prod); - - return false; -} - -void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) -{ - unsigned long flags; - - spin_lock_irqsave(&queue->rx_queue.lock, flags); - - __skb_queue_tail(&queue->rx_queue, skb); - - queue->rx_queue_len += skb->len; - if (queue->rx_queue_len > queue->rx_queue_max) - netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); - - spin_unlock_irqrestore(&queue->rx_queue.lock, flags); -} - -static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) -{ - struct sk_buff *skb; - - spin_lock_irq(&queue->rx_queue.lock); - - skb = __skb_dequeue(&queue->rx_queue); - if (skb) - queue->rx_queue_len -= skb->len; - - spin_unlock_irq(&queue->rx_queue.lock); - - return skb; -} - -static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) -{ - spin_lock_irq(&queue->rx_queue.lock); - - if (queue->rx_queue_len < queue->rx_queue_max) - netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); - - spin_unlock_irq(&queue->rx_queue.lock); -} - - -static void xenvif_rx_queue_purge(struct xenvif_queue *queue) -{ - struct sk_buff *skb; - while ((skb = xenvif_rx_dequeue(queue)) != NULL) - kfree_skb(skb); -} - -static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) -{ - struct sk_buff *skb; - - for(;;) { - skb = skb_peek(&queue->rx_queue); - if (!skb) - break; - if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) - break; - xenvif_rx_dequeue(queue); - kfree_skb(skb); - } -} - -struct netrx_pending_operations { - unsigned copy_prod, copy_cons; - unsigned meta_prod, meta_cons; - struct gnttab_copy *copy; - struct xenvif_rx_meta *meta; - int copy_off; - grant_ref_t copy_gref; -}; - -static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, - struct netrx_pending_operations *npo) -{ - struct xenvif_rx_meta *meta; - struct xen_netif_rx_request req; - - RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); - - meta = npo->meta + npo->meta_prod++; - meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; - meta->gso_size = 0; - meta->size = 0; - meta->id = req.id; - - npo->copy_off = 0; - npo->copy_gref = req.gref; - - return meta; -} - -struct gop_frag_copy { - struct xenvif_queue *queue; - struct netrx_pending_operations *npo; - struct xenvif_rx_meta *meta; - int head; - int gso_type; - int protocol; - int hash_present; - - struct page *page; -}; - -static void xenvif_setup_copy_gop(unsigned long gfn, - unsigned int offset, - unsigned int *len, - struct gop_frag_copy *info) -{ - struct gnttab_copy *copy_gop; - struct xen_page_foreign *foreign; - /* Convenient aliases */ - struct xenvif_queue *queue = info->queue; - struct netrx_pending_operations *npo = info->npo; - struct page *page = info->page; - - BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); - - if (npo->copy_off == MAX_BUFFER_OFFSET) - info->meta = get_next_rx_buffer(queue, npo); - - if (npo->copy_off + *len > MAX_BUFFER_OFFSET) - *len = MAX_BUFFER_OFFSET - npo->copy_off; - - copy_gop = npo->copy + npo->copy_prod++; - copy_gop->flags = GNTCOPY_dest_gref; - copy_gop->len = *len; - - foreign = xen_page_foreign(page); - if (foreign) { - copy_gop->source.domid = foreign->domid; - copy_gop->source.u.ref = foreign->gref; - copy_gop->flags |= GNTCOPY_source_gref; - } else { - copy_gop->source.domid = DOMID_SELF; - copy_gop->source.u.gmfn = gfn; - } - copy_gop->source.offset = offset; - - copy_gop->dest.domid = queue->vif->domid; - copy_gop->dest.offset = npo->copy_off; - copy_gop->dest.u.ref = npo->copy_gref; - - npo->copy_off += *len; - info->meta->size += *len; - - if (!info->head) - return; - - /* Leave a gap for the GSO descriptor. */ - if ((1 << info->gso_type) & queue->vif->gso_mask) - queue->rx.req_cons++; - - /* Leave a gap for the hash extra segment. */ - if (info->hash_present) - queue->rx.req_cons++; - - info->head = 0; /* There must be something in this buffer now */ -} - -static void xenvif_gop_frag_copy_grant(unsigned long gfn, - unsigned offset, - unsigned int len, - void *data) -{ - unsigned int bytes; - - while (len) { - bytes = len; - xenvif_setup_copy_gop(gfn, offset, &bytes, data); - offset += bytes; - len -= bytes; - } -} - -/* - * Set up the grant operations for this fragment. If it's a flipping - * interface, we also set up the unmap request from here. - */ -static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, - struct netrx_pending_operations *npo, - struct page *page, unsigned long size, - unsigned long offset, int *head) -{ - struct gop_frag_copy info = { - .queue = queue, - .npo = npo, - .head = *head, - .gso_type = XEN_NETIF_GSO_TYPE_NONE, - /* xenvif_set_skb_hash() will have either set a s/w - * hash or cleared the hash depending on - * whether the the frontend wants a hash for this skb. - */ - .hash_present = skb->sw_hash, - }; - unsigned long bytes; - - if (skb_is_gso(skb)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - } - - /* Data must not cross a page boundary. */ - BUG_ON(size + offset > PAGE_SIZE<meta + npo->meta_prod - 1; - - /* Skip unused frames from start of page */ - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; - - while (size > 0) { - BUG_ON(offset >= PAGE_SIZE); - - bytes = PAGE_SIZE - offset; - if (bytes > size) - bytes = size; - - info.page = page; - gnttab_foreach_grant_in_range(page, offset, bytes, - xenvif_gop_frag_copy_grant, - &info); - size -= bytes; - offset = 0; - - /* Next page */ - if (size) { - BUG_ON(!PageCompound(page)); - page++; - } - } - - *head = info.head; -} - -/* - * Prepare an SKB to be transmitted to the frontend. - * - * This function is responsible for allocating grant operations, meta - * structures, etc. - * - * It returns the number of meta structures consumed. The number of - * ring slots used is always equal to the number of meta slots used - * plus the number of GSO descriptors used. Currently, we use either - * zero GSO descriptors (for non-GSO packets) or one descriptor (for - * frontend-side LRO). - */ -static int xenvif_gop_skb(struct sk_buff *skb, - struct netrx_pending_operations *npo, - struct xenvif_queue *queue) -{ - struct xenvif *vif = netdev_priv(skb->dev); - int nr_frags = skb_shinfo(skb)->nr_frags; - int i; - struct xen_netif_rx_request req; - struct xenvif_rx_meta *meta; - unsigned char *data; - int head = 1; - int old_meta_prod; - int gso_type; - - old_meta_prod = npo->meta_prod; - - gso_type = XEN_NETIF_GSO_TYPE_NONE; - if (skb_is_gso(skb)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - } - - /* Set up a GSO prefix descriptor, if necessary */ - if ((1 << gso_type) & vif->gso_prefix_mask) { - RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); - meta = npo->meta + npo->meta_prod++; - meta->gso_type = gso_type; - meta->gso_size = skb_shinfo(skb)->gso_size; - meta->size = 0; - meta->id = req.id; - } - - RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); - meta = npo->meta + npo->meta_prod++; - - if ((1 << gso_type) & vif->gso_mask) { - meta->gso_type = gso_type; - meta->gso_size = skb_shinfo(skb)->gso_size; - } else { - meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; - meta->gso_size = 0; - } - - meta->size = 0; - meta->id = req.id; - npo->copy_off = 0; - npo->copy_gref = req.gref; - - data = skb->data; - while (data < skb_tail_pointer(skb)) { - unsigned int offset = offset_in_page(data); - unsigned int len = PAGE_SIZE - offset; - - if (data + len > skb_tail_pointer(skb)) - len = skb_tail_pointer(skb) - data; - - xenvif_gop_frag_copy(queue, skb, npo, - virt_to_page(data), len, offset, &head); - data += len; - } - - for (i = 0; i < nr_frags; i++) { - xenvif_gop_frag_copy(queue, skb, npo, - skb_frag_page(&skb_shinfo(skb)->frags[i]), - skb_frag_size(&skb_shinfo(skb)->frags[i]), - skb_shinfo(skb)->frags[i].page_offset, - &head); - } - - return npo->meta_prod - old_meta_prod; -} - -/* - * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was - * used to set up the operations on the top of - * netrx_pending_operations, which have since been done. Check that - * they didn't give any errors and advance over them. - */ -static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, - struct netrx_pending_operations *npo) -{ - struct gnttab_copy *copy_op; - int status = XEN_NETIF_RSP_OKAY; - int i; - - for (i = 0; i < nr_meta_slots; i++) { - copy_op = npo->copy + npo->copy_cons++; - if (copy_op->status != GNTST_okay) { - netdev_dbg(vif->dev, - "Bad status %d from copy to DOM%d.\n", - copy_op->status, vif->domid); - status = XEN_NETIF_RSP_ERROR; - } - } - - return status; -} - -static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, - struct xenvif_rx_meta *meta, - int nr_meta_slots) -{ - int i; - unsigned long offset; - - /* No fragments used */ - if (nr_meta_slots <= 1) - return; - - nr_meta_slots--; - - for (i = 0; i < nr_meta_slots; i++) { - int flags; - if (i == nr_meta_slots - 1) - flags = 0; - else - flags = XEN_NETRXF_more_data; - - offset = 0; - make_rx_response(queue, meta[i].id, status, offset, - meta[i].size, flags); - } -} - void xenvif_kick_thread(struct xenvif_queue *queue) { wake_up(&queue->wq); } -static void xenvif_rx_action(struct xenvif_queue *queue) -{ - struct xenvif *vif = queue->vif; - s8 status; - u16 flags; - struct xen_netif_rx_response *resp; - struct sk_buff_head rxq; - struct sk_buff *skb; - LIST_HEAD(notify); - int ret; - unsigned long offset; - bool need_to_notify = false; - - struct netrx_pending_operations npo = { - .copy = queue->grant_copy_op, - .meta = queue->meta, - }; - - skb_queue_head_init(&rxq); - - while (xenvif_rx_ring_slots_available(queue) - && (skb = xenvif_rx_dequeue(queue)) != NULL) { - queue->last_rx_time = jiffies; - - XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); - - __skb_queue_tail(&rxq, skb); - } - - BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); - - if (!npo.copy_prod) - goto done; - - BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); - gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); - - while ((skb = __skb_dequeue(&rxq)) != NULL) { - struct xen_netif_extra_info *extra = NULL; - - if ((1 << queue->meta[npo.meta_cons].gso_type) & - vif->gso_prefix_mask) { - resp = RING_GET_RESPONSE(&queue->rx, - queue->rx.rsp_prod_pvt++); - - resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; - - resp->offset = queue->meta[npo.meta_cons].gso_size; - resp->id = queue->meta[npo.meta_cons].id; - resp->status = XENVIF_RX_CB(skb)->meta_slots_used; - - npo.meta_cons++; - XENVIF_RX_CB(skb)->meta_slots_used--; - } - - - queue->stats.tx_bytes += skb->len; - queue->stats.tx_packets++; - - status = xenvif_check_gop(vif, - XENVIF_RX_CB(skb)->meta_slots_used, - &npo); - - if (XENVIF_RX_CB(skb)->meta_slots_used == 1) - flags = 0; - else - flags = XEN_NETRXF_more_data; - - if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ - flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; - else if (skb->ip_summed == CHECKSUM_UNNECESSARY) - /* remote but checksummed. */ - flags |= XEN_NETRXF_data_validated; - - offset = 0; - resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, - status, offset, - queue->meta[npo.meta_cons].size, - flags); - - if ((1 << queue->meta[npo.meta_cons].gso_type) & - vif->gso_mask) { - extra = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&queue->rx, - queue->rx.rsp_prod_pvt++); - - resp->flags |= XEN_NETRXF_extra_info; - - extra->u.gso.type = queue->meta[npo.meta_cons].gso_type; - extra->u.gso.size = queue->meta[npo.meta_cons].gso_size; - extra->u.gso.pad = 0; - extra->u.gso.features = 0; - - extra->type = XEN_NETIF_EXTRA_TYPE_GSO; - extra->flags = 0; - } - - if (skb->sw_hash) { - /* Since the skb got here via xenvif_select_queue() - * we know that the hash has been re-calculated - * according to a configuration set by the frontend - * and therefore we know that it is legitimate to - * pass it to the frontend. - */ - if (resp->flags & XEN_NETRXF_extra_info) - extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; - else - resp->flags |= XEN_NETRXF_extra_info; - - extra = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&queue->rx, - queue->rx.rsp_prod_pvt++); - - extra->u.hash.algorithm = - XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ; - - if (skb->l4_hash) - extra->u.hash.type = - skb->protocol == htons(ETH_P_IP) ? - _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP : - _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP; - else - extra->u.hash.type = - skb->protocol == htons(ETH_P_IP) ? - _XEN_NETIF_CTRL_HASH_TYPE_IPV4 : - _XEN_NETIF_CTRL_HASH_TYPE_IPV6; - - *(uint32_t *)extra->u.hash.value = - skb_get_hash_raw(skb); - - extra->type = XEN_NETIF_EXTRA_TYPE_HASH; - extra->flags = 0; - } - - xenvif_add_frag_responses(queue, status, - queue->meta + npo.meta_cons + 1, - XENVIF_RX_CB(skb)->meta_slots_used); - - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); - - need_to_notify |= !!ret; - - npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; - dev_kfree_skb(skb); - } - -done: - if (need_to_notify) - notify_remote_via_irq(queue->rx_irq); -} - void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) { int more_to_do; @@ -1951,29 +1384,6 @@ static void push_tx_responses(struct xenvif_queue *queue) notify_remote_via_irq(queue->tx_irq); } -static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, - u16 id, - s8 st, - u16 offset, - u16 size, - u16 flags) -{ - RING_IDX i = queue->rx.rsp_prod_pvt; - struct xen_netif_rx_response *resp; - - resp = RING_GET_RESPONSE(&queue->rx, i); - resp->offset = offset; - resp->flags = flags; - resp->id = id; - resp->status = (s16)size; - if (st < 0) - resp->status = (s16)st; - - queue->rx.rsp_prod_pvt = ++i; - - return resp; -} - void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) { int ret; @@ -2055,170 +1465,6 @@ err: return err; } -static void xenvif_queue_carrier_off(struct xenvif_queue *queue) -{ - struct xenvif *vif = queue->vif; - - queue->stalled = true; - - /* At least one queue has stalled? Disable the carrier. */ - spin_lock(&vif->lock); - if (vif->stalled_queues++ == 0) { - netdev_info(vif->dev, "Guest Rx stalled"); - netif_carrier_off(vif->dev); - } - spin_unlock(&vif->lock); -} - -static void xenvif_queue_carrier_on(struct xenvif_queue *queue) -{ - struct xenvif *vif = queue->vif; - - queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ - queue->stalled = false; - - /* All queues are ready? Enable the carrier. */ - spin_lock(&vif->lock); - if (--vif->stalled_queues == 0) { - netdev_info(vif->dev, "Guest Rx ready"); - netif_carrier_on(vif->dev); - } - spin_unlock(&vif->lock); -} - -static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) -{ - RING_IDX prod, cons; - - prod = queue->rx.sring->req_prod; - cons = queue->rx.req_cons; - - return !queue->stalled && prod - cons < 1 - && time_after(jiffies, - queue->last_rx_time + queue->vif->stall_timeout); -} - -static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) -{ - RING_IDX prod, cons; - - prod = queue->rx.sring->req_prod; - cons = queue->rx.req_cons; - - return queue->stalled && prod - cons >= 1; -} - -static bool xenvif_have_rx_work(struct xenvif_queue *queue) -{ - return xenvif_rx_ring_slots_available(queue) - || (queue->vif->stall_timeout && - (xenvif_rx_queue_stalled(queue) - || xenvif_rx_queue_ready(queue))) - || kthread_should_stop() - || queue->vif->disabled; -} - -static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) -{ - struct sk_buff *skb; - long timeout; - - skb = skb_peek(&queue->rx_queue); - if (!skb) - return MAX_SCHEDULE_TIMEOUT; - - timeout = XENVIF_RX_CB(skb)->expires - jiffies; - return timeout < 0 ? 0 : timeout; -} - -/* Wait until the guest Rx thread has work. - * - * The timeout needs to be adjusted based on the current head of the - * queue (and not just the head at the beginning). In particular, if - * the queue is initially empty an infinite timeout is used and this - * needs to be reduced when a skb is queued. - * - * This cannot be done with wait_event_timeout() because it only - * calculates the timeout once. - */ -static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) -{ - DEFINE_WAIT(wait); - - if (xenvif_have_rx_work(queue)) - return; - - for (;;) { - long ret; - - prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); - if (xenvif_have_rx_work(queue)) - break; - ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); - if (!ret) - break; - } - finish_wait(&queue->wq, &wait); -} - -int xenvif_kthread_guest_rx(void *data) -{ - struct xenvif_queue *queue = data; - struct xenvif *vif = queue->vif; - - if (!vif->stall_timeout) - xenvif_queue_carrier_on(queue); - - for (;;) { - xenvif_wait_for_rx_work(queue); - - if (kthread_should_stop()) - break; - - /* This frontend is found to be rogue, disable it in - * kthread context. Currently this is only set when - * netback finds out frontend sends malformed packet, - * but we cannot disable the interface in softirq - * context so we defer it here, if this thread is - * associated with queue 0. - */ - if (unlikely(vif->disabled && queue->id == 0)) { - xenvif_carrier_off(vif); - break; - } - - if (!skb_queue_empty(&queue->rx_queue)) - xenvif_rx_action(queue); - - /* If the guest hasn't provided any Rx slots for a - * while it's probably not responsive, drop the - * carrier so packets are dropped earlier. - */ - if (vif->stall_timeout) { - if (xenvif_rx_queue_stalled(queue)) - xenvif_queue_carrier_off(queue); - else if (xenvif_rx_queue_ready(queue)) - xenvif_queue_carrier_on(queue); - } - - /* Queued packets may have foreign pages from other - * domains. These cannot be queued indefinitely as - * this would starve guests of grant refs and transmit - * slots. - */ - xenvif_rx_queue_drop_expired(queue); - - xenvif_rx_queue_maybe_wake(queue); - - cond_resched(); - } - - /* Bin any remaining skbs */ - xenvif_rx_queue_purge(queue); - - return 0; -} - static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) { /* Dealloc thread must remain running until all inflight diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c new file mode 100644 index 000000000000..03836aaac1c2 --- /dev/null +++ b/drivers/net/xen-netback/rx.c @@ -0,0 +1,789 @@ +/* + * Copyright (c) 2016 Citrix Systems Inc. + * Copyright (c) 2002-2005, K A Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include + +#include +#include + +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + struct sk_buff *skb; + int needed; + + skb = skb_peek(&queue->rx_queue); + if (!skb) + return false; + + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) + needed++; + if (skb->sw_hash) + needed++; + + do { + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + + if (prod - cons >= needed) + return true; + + queue->rx.sring->req_event = prod + 1; + + /* Make sure event is visible before we check prod + * again. + */ + mb(); + } while (queue->rx.sring->req_prod != prod); + + return false; +} + +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); + + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) { + struct net_device *dev = queue->vif->dev; + + netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); + } + + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +} + +static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + spin_lock_irq(&queue->rx_queue.lock); + + skb = __skb_dequeue(&queue->rx_queue); + if (skb) + queue->rx_queue_len -= skb->len; + + spin_unlock_irq(&queue->rx_queue.lock); + + return skb; +} + +static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) +{ + spin_lock_irq(&queue->rx_queue.lock); + + if (queue->rx_queue_len < queue->rx_queue_max) { + struct net_device *dev = queue->vif->dev; + + netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); + } + + spin_unlock_irq(&queue->rx_queue.lock); +} + +static void xenvif_rx_queue_purge(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + while ((skb = xenvif_rx_dequeue(queue)) != NULL) + kfree_skb(skb); +} + +static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + + for (;;) { + skb = skb_peek(&queue->rx_queue); + if (!skb) + break; + if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) + break; + xenvif_rx_dequeue(queue); + kfree_skb(skb); + } +} + +struct netrx_pending_operations { + unsigned int copy_prod, copy_cons; + unsigned int meta_prod, meta_cons; + struct gnttab_copy *copy; + struct xenvif_rx_meta *meta; + int copy_off; + grant_ref_t copy_gref; +}; + +static struct xenvif_rx_meta *get_next_rx_buffer( + struct xenvif_queue *queue, + struct netrx_pending_operations *npo) +{ + struct xenvif_rx_meta *meta; + struct xen_netif_rx_request req; + + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); + + meta = npo->meta + npo->meta_prod++; + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; + meta->gso_size = 0; + meta->size = 0; + meta->id = req.id; + + npo->copy_off = 0; + npo->copy_gref = req.gref; + + return meta; +} + +struct gop_frag_copy { + struct xenvif_queue *queue; + struct netrx_pending_operations *npo; + struct xenvif_rx_meta *meta; + int head; + int gso_type; + int protocol; + int hash_present; + + struct page *page; +}; + +static void xenvif_setup_copy_gop(unsigned long gfn, + unsigned int offset, + unsigned int *len, + struct gop_frag_copy *info) +{ + struct gnttab_copy *copy_gop; + struct xen_page_foreign *foreign; + /* Convenient aliases */ + struct xenvif_queue *queue = info->queue; + struct netrx_pending_operations *npo = info->npo; + struct page *page = info->page; + + WARN_ON(npo->copy_off > MAX_BUFFER_OFFSET); + + if (npo->copy_off == MAX_BUFFER_OFFSET) + info->meta = get_next_rx_buffer(queue, npo); + + if (npo->copy_off + *len > MAX_BUFFER_OFFSET) + *len = MAX_BUFFER_OFFSET - npo->copy_off; + + copy_gop = npo->copy + npo->copy_prod++; + copy_gop->flags = GNTCOPY_dest_gref; + copy_gop->len = *len; + + foreign = xen_page_foreign(page); + if (foreign) { + copy_gop->source.domid = foreign->domid; + copy_gop->source.u.ref = foreign->gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = gfn; + } + copy_gop->source.offset = offset; + + copy_gop->dest.domid = queue->vif->domid; + copy_gop->dest.offset = npo->copy_off; + copy_gop->dest.u.ref = npo->copy_gref; + + npo->copy_off += *len; + info->meta->size += *len; + + if (!info->head) + return; + + /* Leave a gap for the GSO descriptor. */ + if ((1 << info->gso_type) & queue->vif->gso_mask) + queue->rx.req_cons++; + + /* Leave a gap for the hash extra segment. */ + if (info->hash_present) + queue->rx.req_cons++; + + info->head = 0; /* There must be something in this buffer now */ +} + +static void xenvif_gop_frag_copy_grant(unsigned long gfn, + unsigned int offset, + unsigned int len, + void *data) +{ + unsigned int bytes; + + while (len) { + bytes = len; + xenvif_setup_copy_gop(gfn, offset, &bytes, data); + offset += bytes; + len -= bytes; + } +} + +/* Set up the grant operations for this fragment. If it's a flipping + * interface, we also set up the unmap request from here. + */ +static void xenvif_gop_frag_copy(struct xenvif_queue *queue, + struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct page *page, unsigned long size, + unsigned long offset, int *head) +{ + struct gop_frag_copy info = { + .queue = queue, + .npo = npo, + .head = *head, + .gso_type = XEN_NETIF_GSO_TYPE_NONE, + /* xenvif_set_skb_hash() will have either set a s/w + * hash or cleared the hash depending on + * whether the the frontend wants a hash for this skb. + */ + .hash_present = skb->sw_hash, + }; + unsigned long bytes; + + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } + + /* Data must not cross a page boundary. */ + WARN_ON(size + offset > (PAGE_SIZE << compound_order(page))); + + info.meta = npo->meta + npo->meta_prod - 1; + + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + + while (size > 0) { + WARN_ON(offset >= PAGE_SIZE); + + bytes = PAGE_SIZE - offset; + if (bytes > size) + bytes = size; + + info.page = page; + gnttab_foreach_grant_in_range(page, offset, bytes, + xenvif_gop_frag_copy_grant, + &info); + size -= bytes; + offset = 0; + + /* Next page */ + if (size) { + WARN_ON(!PageCompound(page)); + page++; + } + } + + *head = info.head; +} + +/* Prepare an SKB to be transmitted to the frontend. + * + * This function is responsible for allocating grant operations, meta + * structures, etc. + * + * It returns the number of meta structures consumed. The number of + * ring slots used is always equal to the number of meta slots used + * plus the number of GSO descriptors used. Currently, we use either + * zero GSO descriptors (for non-GSO packets) or one descriptor (for + * frontend-side LRO). + */ +static int xenvif_gop_skb(struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct xenvif_queue *queue) +{ + struct xenvif *vif = netdev_priv(skb->dev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + struct xen_netif_rx_request req; + struct xenvif_rx_meta *meta; + unsigned char *data; + int head = 1; + int old_meta_prod; + int gso_type; + + old_meta_prod = npo->meta_prod; + + gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } + + /* Set up a GSO prefix descriptor, if necessary */ + if ((1 << gso_type) & vif->gso_prefix_mask) { + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + meta->gso_type = gso_type; + meta->gso_size = skb_shinfo(skb)->gso_size; + meta->size = 0; + meta->id = req.id; + } + + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); + meta = npo->meta + npo->meta_prod++; + + if ((1 << gso_type) & vif->gso_mask) { + meta->gso_type = gso_type; + meta->gso_size = skb_shinfo(skb)->gso_size; + } else { + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; + meta->gso_size = 0; + } + + meta->size = 0; + meta->id = req.id; + npo->copy_off = 0; + npo->copy_gref = req.gref; + + data = skb->data; + while (data < skb_tail_pointer(skb)) { + unsigned int offset = offset_in_page(data); + unsigned int len = PAGE_SIZE - offset; + + if (data + len > skb_tail_pointer(skb)) + len = skb_tail_pointer(skb) - data; + + xenvif_gop_frag_copy(queue, skb, npo, + virt_to_page(data), len, offset, &head); + data += len; + } + + for (i = 0; i < nr_frags; i++) { + xenvif_gop_frag_copy(queue, skb, npo, + skb_frag_page(&skb_shinfo(skb)->frags[i]), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + skb_shinfo(skb)->frags[i].page_offset, + &head); + } + + return npo->meta_prod - old_meta_prod; +} + +/* This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was + * used to set up the operations on the top of + * netrx_pending_operations, which have since been done. Check that + * they didn't give any errors and advance over them. + */ +static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, + struct netrx_pending_operations *npo) +{ + struct gnttab_copy *copy_op; + int status = XEN_NETIF_RSP_OKAY; + int i; + + for (i = 0; i < nr_meta_slots; i++) { + copy_op = npo->copy + npo->copy_cons++; + if (copy_op->status != GNTST_okay) { + netdev_dbg(vif->dev, + "Bad status %d from copy to DOM%d.\n", + copy_op->status, vif->domid); + status = XEN_NETIF_RSP_ERROR; + } + } + + return status; +} + +static struct xen_netif_rx_response *make_rx_response( + struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size, + u16 flags) +{ + RING_IDX i = queue->rx.rsp_prod_pvt; + struct xen_netif_rx_response *resp; + + resp = RING_GET_RESPONSE(&queue->rx, i); + resp->offset = offset; + resp->flags = flags; + resp->id = id; + resp->status = (s16)size; + if (st < 0) + resp->status = (s16)st; + + queue->rx.rsp_prod_pvt = ++i; + + return resp; +} + +static void xenvif_add_frag_responses(struct xenvif_queue *queue, + int status, + struct xenvif_rx_meta *meta, + int nr_meta_slots) +{ + int i; + unsigned long offset; + + /* No fragments used */ + if (nr_meta_slots <= 1) + return; + + nr_meta_slots--; + + for (i = 0; i < nr_meta_slots; i++) { + int flags; + + if (i == nr_meta_slots - 1) + flags = 0; + else + flags = XEN_NETRXF_more_data; + + off