summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
blob: 92d06cca0266e80c4a50d66a799b75f0ae29fcdb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
/*
 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 */

#include "mt76x0.h"
#include "trace.h"
#include "../mt76x02_util.h"

/* Take mac80211 Q id from the skb and translate it to hardware Q id */
static u8 skb2q(struct sk_buff *skb)
{
	int qid = skb_get_queue_mapping(skb);

	if (WARN_ON(qid >= MT_TXQ_PSD)) {
		qid = MT_TXQ_BE;
		skb_set_queue_mapping(skb, qid);
	}

	return mt76_ac_to_hwq(qid);
}

static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
					       struct ieee80211_tx_info *info)
{
	int pkt_len = (unsigned long)info->status.status_driver_data[0];

	skb_pull(skb, sizeof(struct mt76x02_txwi) + 4);
	if (ieee80211_get_hdrlen_from_skb(skb) % 4)
		mt76x02_remove_hdr_pad(skb, 2);

	skb_trim(skb, pkt_len);
}

void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	mt76x0_tx_skb_remove_dma_overhead(skb, info);

	ieee80211_tx_info_clear_status(info);
	info->status.rates[0].idx = -1;
	info->flags |= IEEE80211_TX_STAT_ACK;

	spin_lock(&dev->mac_lock);
	ieee80211_tx_status(dev->mt76.hw, skb);
	spin_unlock(&dev->mac_lock);
}

static struct mt76x02_txwi *
mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
		  struct ieee80211_sta *sta, struct mt76_wcid *wcid,
		  int pkt_len)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_tx_rate *rate = &info->control.rates[0];
	struct mt76x02_txwi *txwi;
	unsigned long flags;
	u16 txwi_flags = 0;
	u32 pkt_id;
	u16 rate_ctl;
	u8 nss;

	txwi = (struct mt76x02_txwi *)skb_push(skb, sizeof(struct mt76x02_txwi));
	memset(txwi, 0, sizeof(*txwi));

	if (!wcid->tx_rate_set)
		ieee80211_get_tx_rates(info->control.vif, sta, skb,
				       info->control.rates, 1);

	spin_lock_irqsave(&dev->mt76.lock, flags);
	if (rate->idx < 0 || !rate->count) {
		rate_ctl = wcid->tx_rate;
		nss = wcid->tx_rate_nss;
	} else {
		rate_ctl = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss);
	}
	spin_unlock_irqrestore(&dev->mt76.lock, flags);

	txwi->rate = cpu_to_le16(rate_ctl);

	if (info->flags & IEEE80211_TX_CTL_LDPC)
		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
		txwi_flags |= MT_TXWI_FLAGS_MMPS;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
		pkt_id = 1;
	} else {
		pkt_id = 0;
	}

	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
		pkt_id |= MT_TXWI_PKTID_PROBE;

	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;

	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;

		ba_size <<= sta->ht_cap.ampdu_factor;
		ba_size = min_t(int, 7, ba_size - 1);
		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
			ba_size = 0;
		} else {
			txwi_flags |= MT_TXWI_FLAGS_AMPDU;
			txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
						 sta->ht_cap.ampdu_density);
		}
		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
	}

	txwi->wcid = wcid->idx;
	txwi->flags |= cpu_to_le16(txwi_flags);
	txwi->len_ctl = cpu_to_le16(pkt_len);
	txwi->pktid = pkt_id;

	return txwi;
}

void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
		struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct mt76x0_dev *dev = hw->priv;
	struct ieee80211_vif *vif = info->control.vif;
	struct ieee80211_sta *sta = control->sta;
	struct mt76x02_sta *msta = NULL;
	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
	struct mt76x02_txwi *txwi;
	int pkt_len = skb->len;
	int hw_q = skb2q(skb);

	BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
	info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;

	mt76x02_insert_hdr_pad(skb);

	if (sta) {
		msta = (struct mt76x02_sta *) sta->drv_priv;
		wcid = &msta->wcid;
	} else if (vif && (!info->control.hw_key && wcid->hw_key_idx != 0xff)) {
		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;

		wcid = &mvif->group_wcid;
	}

	txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);

	if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
		return;

	trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
}

void mt76x0_tx_stat(struct work_struct *work)
{
	struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
					       stat_work.work);
	struct mt76x02_tx_status stat;
	unsigned long flags;
	int cleaned = 0;
	u8 update = 1;

	while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
		if (!mt76x02_mac_load_tx_status(&dev->mt76, &stat))
			break;

		mt76x02_send_tx_status(&dev->mt76, &stat, &update);

		cleaned++;
	}
	trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);

	spin_lock_irqsave(&dev->tx_lock, flags);
	if (cleaned)
		queue_delayed_work(dev->stat_wq, &dev->stat_work,
				   msecs_to_jiffies(10));
	else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
		queue_delayed_work(dev->stat_wq, &dev->stat_work,
				   msecs_to_jiffies(20));
	else
		clear_bit(MT76_READING_STATS, &dev->mt76.state);
	spin_unlock_irqrestore(&dev->tx_lock, flags);
}