// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Ethernet driver
*
* Copyright (C) 2020 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
struct nix_cqe_hdr_s *cqe_hdr;
cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
return NULL;
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
return cqe_hdr;
}
static unsigned int frag_num(unsigned int i)
{
#ifdef __BIG_ENDIAN
return (i & ~3) + 3 - (i & 3);
#else
return i;
#endif
}
static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len)
{
const skb_frag_t *frag;
struct page *page;
int offset;
/* First segment is always skb->data */
if (!seg) {
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
*len = skb_headlen(skb);
} else {
frag = &skb_shinfo(skb)->frags[seg - 1];
page = skb_frag_page(frag);
offset = skb_frag_off(frag);
*len = skb_frag_size(frag);
}
return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
{
int seg;
for (seg = 0; seg < sg->num_segs; seg++) {
otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
sg->size[seg], DMA_TO_DEVICE);
}
sg->num_segs = 0;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sk_buff *skb = NULL;
struct sg_list *sg;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
pfvf->netdev->name, cq->cint_idx,
snd_comp->status);
sg = &sq->sg[snd_comp->sqe_id];
skb = (struct sk_buff *)sg->skb;
if (unlikely(!skb))
return;
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
napi_consume_skb(skb, budget);
sg->skb = (u64)NULL;
}
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
u64 iova, int len)
{
struct page *page;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
va - page_address(page), len, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
{
enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
struct otx2_rss_info *rss;
u32 hash = 0;
if (!(pfvf->netdev->features & NETIF_F_RXHASH))
return;
rss = &pfvf->hw.rss_info;
if (rss->flowkey_cfg) {
if (rss->flowkey_cfg &
~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
hash_type = PKT_HASH_TYPE_L4;
else
hash_type = PKT_HASH_TYPE_L3;
hash = cqe->hdr.flow_tag;
}
skb_set_hash(skb, hash, hash_type);
}
static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
int qidx)
{
struct nix_rx_sg_s *sg = &cqe->sg;
void *end, *start;