summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-11-12 16:54:48 -0800
committerJakub Kicinski <kuba@kernel.org>2020-11-12 16:54:48 -0800
commite1d9d7b91302593d1951fcb12feddda6fb58a3c0 (patch)
tree25662dfcceb84414d66f6c90714883b1542c0ea5 /drivers/net/ethernet/chelsio
parente865802357086b36632acf3e629f726f089a6769 (diff)
parentdb7c953555388571a96ed8783ff6c5745ba18ab9 (diff)
Merge https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c111
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c582
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h1
7 files changed, 478 insertions, 228 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3352dad6ca99..27308600da15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c24d34a937c8..7d49fd4edc9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "TX sw fallback : %20llu\n",
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
while (i < MAX_NPORTS) {
ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Port %d\n", i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a952fe198eb9..7fd264a6d085 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b169776ab484..1b49f2fa9b18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_fallback;
};
#endif
@@ -493,6 +494,11 @@ struct cxgb4_uld_info {
#endif
};
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a9e9c7ae565d..196652a114c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
}
EXPORT_SYMBOL(cxgb4_write_sgl);
+/* cxgb4_write_partial_sgl - populate SGL for partial packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ * @start: start offset in the SKB where partial data starts
+ * @len: length of data from @start to send out
+ *
+ * This API will handle sending out partial data of a skb if required.
+ * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
+ * and @len will decide how much data after @start offset to send out.
+ */
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 len)
+{
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u8 i = 0, frag_idx = 0, nfrags = 0;
+ skb_frag_t *frag;
+
+ /* Fill the first SGL either from linear data or from partial
+ * frag based on @start.
+ */
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ sgl->len0 = htonl(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ len -= frag_size;
+ nfrags++;
+ } else {
+ start -= skb_linear_data_len;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ /* find the first frag */
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_size = min(len, skb_frag_size(frag) - start);
+ sgl->len0 = cpu_to_be32(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+
+ /* If the entire partial data fit in one SGL, then send it out
+ * now.
+ */
+ if (!len)
+ goto done;
+
+ /* Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ /* If the skb couldn't fit in first SGL completely, fill the
+ * rest of the frags in subsequent SGLs. Note that each SGL
+ * pair can store 2 frags.
+ */
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ to->len[i & 1] = cpu_to_be32(frag_size);
+ to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
+ if (i && (i & 1))
+ to++;
+ nfrags++;
+ frag_idx++;
+ i++;
+ len -= frag_size;
+ }
+
+ /* If we ended in an odd boundary, then set the second SGL's
+ * length in the pair to 0.
+ */
+ if (i & 1)
+ to->len[1] = cpu_to_be32(0);
+
+ /* Copy from temporary buffer to Tx ring, in case we hit the
+ * end of the queue in the middle of writing the SGL.
+ */
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+
+ /* 0-pad to multiple of 16 */
+ if ((uintptr_t)end & 8)
+ *end = 0;
+done:
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
+}
+EXPORT_SYMBOL(cxgb4_write_partial_sgl);
+
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host.
@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
- if (skb->decrypted)
+ if (cxgb4_is_ktls_skb(skb) &&
+ (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 5195f692f14d..c24485c0d512 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -14,6 +14,50 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
+ * @skb: skb
+ * @start: start offset.
+ * @len: how much data to send after @start
+ */
+static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+{
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ u8 nfrags = 0, frag_idx = 0;
+ skb_frag_t *frag;
+
+ /* if its a linear skb then return 1 */
+ if (!skb_is_nonlinear(skb))
+ return 1;
+
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ start = 0;
+ } else {
+ start -= skb_linear_data_len;
+
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+ frag_size = min(len, skb_frag_size(frag) - start);
+ }
+ len -= frag_size;
+ nfrags++;
+
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+ return nfrags;
+}
+
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
@@ -689,7 +733,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
}
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
- u32 tid, void *pos, u16 word, u64 mask,
+ u32 tid, void *pos, u16 word,
+ struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply)
{
struct cpl_set_tcb_field_core *cpl;
@@ -698,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */
txpkt = pos;
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */
@@ -753,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else {
u8 buf[48] = {0};
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos,
@@ -761,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
}
}
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply);
/* check again if we are at the end of the queue */
@@ -783,11 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq,
- u64 tcp_ack, u64 tcp_win)
+ u64 tcp_ack, u64 tcp_win, bool offset)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats;
- u32 len, cpl = 0, ndesc, wr_len;
+ u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr;
int credits;
void *pos;
@@ -803,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length.
@@ -818,7 +871,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++;
}
/* reset snd una if it's a re-transmit pkt */
- if (tcp_seq != tx_info->prev_seq) {
+ if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */
port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -827,7 +880,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&port_stats->ktls_tx_ooo);
+ if (tcp_seq != tx_info->prev_seq)
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -856,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+ wr->flowid_len16 = htonl(wr_mid |
+ FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc);
@@ -866,34 +921,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/*
- * chcr_ktls_skb_copy
- * @nskb - new skb where the frags to be added.
- * @skb - old skb from which frags will be copied.
- */
-static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
- }
-
- skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
- nskb->len += skb->data_len;
- nskb->data_len = skb->data_len;
- nskb->truesize += skb->data_len;
-}
-
-/*
* chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments.
*/
static unsigned int
-chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{
- return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
}
@@ -957,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp;
int len16, pktlen;
struct iphdr *ip;
+ u32 wr_mid = 0;
int credits;
u8 buf[150];
+ u64 cntrl1;
void *pos;
iplen = skb_network_header_len(skb);
@@ -967,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb->len - skb->data_len;
+ pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -980,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
wr = pos;
@@ -987,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
- wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
@@ -997,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cpl->len = htons(pktlen);
- /* checksum offload */
- cpl->ctrl1 = 0;
-
- pos = cpl + 1;
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
+
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+
+ pos = cpl + 1;
+
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet.
@@ -1031,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0;
}
-/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
- * @tgt- buffer into which tail data gets added
- * @skb- buffer from which the paged data comes from
- * @shiftlen- shift up to this many bytes
- */
-static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen)
-{
- skb_frag_t *fragfrom, *fragto;
- int from, to, todo;
-
- WARN_ON(shiftlen > skb->data_len);
-
- todo = shiftlen;
- from = 0;
- to = 0;
- fragfrom = &skb_shinfo(skb)->frags[from];
-
- while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
- fragfrom = &skb_shinfo(skb)->frags[from];
- fragto = &skb_shinfo(tgt)->frags[to];
-
- if (todo >= skb_frag_size(fragfrom)) {
- *fragto = *fragfrom;
- todo -= skb_frag_size(fragfrom);
- from++;
- to++;
-
- } else {
- __skb_frag_ref(fragfrom);
- skb_frag_page_copy(fragto, fragfrom);
- skb_frag_off_copy(fragto, fragfrom);
- skb_frag_size_set(fragto, todo);
-
- skb_frag_off_add(fragfrom, todo);
- skb_frag_size_sub(fragfrom, todo);
- todo = 0;
-
- to++;
- break;
- }
- }
-
- /* Ready to "commit" this state change to tgt */
- skb_shinfo(tgt)->nr_frags = to;
-
- /* Reposition in the original skb */
- to = 0;
- while (from < skb_shinfo(skb)->nr_frags)
- skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
-
- skb_shinfo(skb)->nr_frags = to;
-
- WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
-
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
-
- return shiftlen;
-}
-
/*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so
@@ -1111,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq,
+ bool is_last_wr, u32 data_len,
+ u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss)
{
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
@@ -1126,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end;
/* get the number of flits required */
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */
ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */
@@ -1155,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (!is_last_wr)
+ skb_get(skb);
+
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
/* FW_ULPTX_WR */
@@ -1187,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
- cpl->pldlen = htonl(skb->data_len);
+ cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
@@ -1229,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
- tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq);
@@ -1249,8 +1237,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
}
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1282,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data,
- u32 prior_data_len)
+ u32 prior_data_len, u32 data_len,
+ u32 skb_offset)
{
+ u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap;
- u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc;
int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc;
@@ -1298,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos;
u64 *end;
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added.
*/
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need
@@ -1373,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
- cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0;
@@ -1404,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push)
@@ -1437,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1466,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data,
+ u32 data_len, u32 skb_offset,
u32 prior_data_len)
{
int credits, left, len16, last_desc;
@@ -1475,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata;
struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr;
- u32 wr_mid = 0;
+ u32 wr_mid = 0, nfrags;
void *pos;
u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
- flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
+ flits += chcr_sgl_len(nfrags);
if (prior_data_len)
flits += 2;
+
/* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
@@ -1535,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
@@ -1559,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left;
}
/* send the complete packet including the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1568,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0;
}
+static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct sge_eth_txq *q)
+{
+ u32 ctrl, iplen, maclen, wr_mid = 0, len16;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ unsigned int flits, ndesc;
+ int credits, last_desc;
+ u64 cntrl1, *end;
+ void *pos;
+
+ ctrl = sizeof(*cpl);
+ flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
+
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return -ENOMEM;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return -ENOMEM;
+ }
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
+ TXPKT_INTF_V(tx_info->tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
+ TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+ cpl->len = htons(skb->len);
+
+ pos = cpl + 1;
+
+ cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
/*
* chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added.
+ * @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags.
*/
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct sk_buff *skb,
struct tls_record_info *record)
{
int i = 0;
@@ -1588,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len;
nskb->len += record->len;
nskb->truesize += record->len;
+ nskb->sk = skb->sk;
+ nskb->destructor = skb->destructor;
+ refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
@@ -1659,7 +1739,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
- struct sge_eth_txq *q,
+ struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
struct sk_buff *nskb = NULL;
@@ -1668,30 +1748,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
nskb = skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
- dev_kfree_skb_any(skb);
-
- nskb = alloc_skb(0, GFP_KERNEL);
- if (!nskb)
+ nskb = alloc_skb(0, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
+ }
+
/* copy complete record in skb */
- chcr_ktls_copy_record_in_skb(nskb, record);
+ chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq
* accordingly.
*/
tcp_seq = tls_record_start_seq(record);
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
+ /* reset skb offset */
+ skb_offset = 0;
+
+ if (last_wr)
+ dev_kfree_skb_any(skb);
+
+ last_wr = true;
+
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ last_wr, record->len, skb_offset,
+ record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
goto out;
}
+ tx_info->prev_seq = record->end_seq;
return 0;
out:
dev_kfree_skb_any(nskb);
@@ -1723,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset)
{
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0};
u32 prior_data_len = 0;
- u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH.
*/
- int remaining_record = tls_end_offset - skb->data_len;
+ int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
- int trimmed_len = skb->data_len -
- (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
- struct sk_buff *tmp_skb = NULL;
- /* don't process the pkt if it is only a partial tag */
- if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
- goto out;
+ int trimmed_len = 0;
- WARN_ON(trimmed_len > skb->data_len);
+ if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ trimmed_len = data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
+ remaining_record);
+ if (!trimmed_len)
+ return FALLBACK;
- /* shift to those many bytes */
- tmp_skb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!tmp_skb))
- goto out;
+ WARN_ON(trimmed_len > data_len);
- chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
- /* free the last trimmed portion */
- dev_kfree_skb_any(skb);
- skb = tmp_skb;
+ data_len = trimmed_len;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
}
- data_len = skb->data_len;
+
+ /* check if it is only the header part. */
+ if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id, prior_data,
+ data_len, skb_offset, prior_data_len))
+ goto out;
+
+ tx_info->prev_seq = tcp_seq + data_len;
+ return 0;
+ }
+
/* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption.
*/
@@ -1818,9 +1911,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
- /* include prio_data_len for further calculation.
- */
- data_len += prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
@@ -1829,37 +1919,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
- /* Else means, its a partial first part of the record. Check if
- * its only the header, don't need to send for encryption then.
- */
- if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
- if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
- tcp_push_no_fin, q,
- tx_info->port_id,
- prior_data,
- prior_data_len)) {
- goto out;
- }
- return 0;
- }
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data,
- prior_data_len)) {
+ prior_data_len, data_len, skb_offset)) {
goto out;
}
+ tx_info->prev_se