aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
diff options
context:
space:
mode:
authorSujith Manoharan <Sujith.Manoharan@atheros.com>2011-04-13 11:26:18 +0530
committerJohn W. Linville <linville@tuxdriver.com>2011-04-13 15:24:20 -0400
commit27876a29de221186c9d5883e5fe5f6da18ef9a45 (patch)
treef47eb4d749e7bc49c8248171177c61d9bbe9952f /drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
parent84c9e164468bd707e52b440e1c34bc3c85299332 (diff)
downloadkernel_samsung_smdk4412-27876a29de221186c9d5883e5fe5f6da18ef9a45.zip
kernel_samsung_smdk4412-27876a29de221186c9d5883e5fe5f6da18ef9a45.tar.gz
kernel_samsung_smdk4412-27876a29de221186c9d5883e5fe5f6da18ef9a45.tar.bz2
ath9k_htc: Add support for TX completion
Now that the infrastructure is in place, process WMI TX status events and complete packets. Signed-off-by: Sujith Manoharan <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/htc_drv_txrx.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c208
1 files changed, 183 insertions, 25 deletions
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 0790070..a9b6bb1 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -99,8 +99,8 @@ void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot)
spin_unlock_bh(&priv->tx.tx_lock);
}
-static enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
- u16 qnum)
+static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
+ u16 qnum)
{
enum htc_endpoint_id epid;
@@ -127,6 +127,30 @@ static enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
return epid;
}
+static inline struct sk_buff_head*
+get_htc_epid_queue(struct ath9k_htc_priv *priv, u8 epid)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct sk_buff_head *epid_queue = NULL;
+
+ if (epid == priv->mgmt_ep)
+ epid_queue = &priv->tx.mgmt_ep_queue;
+ else if (epid == priv->cab_ep)
+ epid_queue = &priv->tx.cab_ep_queue;
+ else if (epid == priv->data_be_ep)
+ epid_queue = &priv->tx.data_be_queue;
+ else if (epid == priv->data_bk_ep)
+ epid_queue = &priv->tx.data_bk_queue;
+ else if (epid == priv->data_vi_ep)
+ epid_queue = &priv->tx.data_vi_queue;
+ else if (epid == priv->data_vo_ep)
+ epid_queue = &priv->tx.data_vo_queue;
+ else
+ ath_err(common, "Invalid EPID: %d\n", epid);
+
+ return epid_queue;
+}
+
/*
* Removes the driver header and returns the TX slot number
*/
@@ -387,11 +411,15 @@ static void ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
}
static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct __wmi_event_txstatus *txs)
{
struct ieee80211_vif *vif;
struct ath9k_htc_tx_ctl *tx_ctl;
struct ieee80211_tx_info *tx_info;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_conf *cur_conf = &priv->hw->conf;
+ struct ieee80211_supported_band *sband;
bool txok;
int slot;
@@ -405,6 +433,8 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
txok = tx_ctl->txok;
tx_info = IEEE80211_SKB_CB(skb);
vif = tx_info->control.vif;
+ rate = &tx_info->status.rates[0];
+ sband = priv->hw->wiphy->bands[cur_conf->channel->band];
memset(&tx_info->status, 0, sizeof(tx_info->status));
@@ -412,10 +442,32 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
* URB submission failed for this frame, it never reached
* the target.
*/
- if (!txok || !vif)
+ if (!txok || !vif || !txs)
goto send_mac80211;
- tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_FILT)
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_RTC_CTS)
+ rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
+
+ rate->count = 1;
+ rate->idx = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_RATE);
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_MCS) {
+ rate->flags |= IEEE80211_TX_RC_MCS;
+
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_CW40)
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+ } else {
+ if (cur_conf->channel->band == IEEE80211_BAND_5GHZ)
+ rate->idx += 4; /* No CCK rates */
+ }
ath9k_htc_check_tx_aggr(priv, vif, skb);
@@ -431,37 +483,130 @@ send_mac80211:
ieee80211_tx_status(priv->hw, skb);
}
+static inline void ath9k_htc_tx_drainq(struct ath9k_htc_priv *priv,
+ struct sk_buff_head *queue)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(queue)) != NULL) {
+ ath9k_htc_tx_process(priv, skb, NULL);
+ }
+}
+
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
{
- struct sk_buff *skb = NULL;
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN;
+ spin_unlock_bh(&priv->tx.tx_lock);
/*
* Ensure that all pending TX frames are flushed,
- * and that the TX completion tasklet is killed.
+ * and that the TX completion/failed tasklets is killed.
*/
htc_stop(priv->htc);
- tasklet_kill(&priv->tx_tasklet);
+ tasklet_kill(&priv->wmi->wmi_event_tasklet);
+ tasklet_kill(&priv->tx_failed_tasklet);
- while ((skb = skb_dequeue(&priv->tx.tx_queue)) != NULL) {
- ath9k_htc_tx_process(priv, skb);
- }
+ ath9k_htc_tx_drainq(priv, &priv->tx.mgmt_ep_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.cab_ep_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_be_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_bk_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_vi_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue);
+ ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
- while ((skb = skb_dequeue(&priv->tx.tx_failed)) != NULL) {
- ath9k_htc_tx_process(priv, skb);
- }
+ spin_lock_bh(&priv->tx.tx_lock);
+ priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN;
+ spin_unlock_bh(&priv->tx.tx_lock);
}
-void ath9k_tx_tasklet(unsigned long data)
+void ath9k_tx_failed_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
- struct sk_buff *skb = NULL;
- while ((skb = skb_dequeue(&priv->tx.tx_queue)) != NULL) {
- ath9k_htc_tx_process(priv, skb);
+ spin_lock_bh(&priv->tx.tx_lock);
+ if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
+ spin_unlock_bh(&priv->tx.tx_lock);
+ return;
}
+ spin_unlock_bh(&priv->tx.tx_lock);
- while ((skb = skb_dequeue(&priv->tx.tx_failed)) != NULL) {
- ath9k_htc_tx_process(priv, skb);
+ ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed);
+}
+
+static inline bool check_cookie(struct ath9k_htc_priv *priv,
+ struct sk_buff *skb,
+ u8 cookie, u8 epid)
+{
+ u8 fcookie = 0;
+
+ if (epid == priv->mgmt_ep) {
+ struct tx_mgmt_hdr *hdr;
+ hdr = (struct tx_mgmt_hdr *) skb->data;
+ fcookie = hdr->cookie;
+ } else if ((epid == priv->data_bk_ep) ||
+ (epid == priv->data_be_ep) ||
+ (epid == priv->data_vi_ep) ||
+ (epid == priv->data_vo_ep) ||
+ (epid == priv->cab_ep)) {
+ struct tx_frame_hdr *hdr;
+ hdr = (struct tx_frame_hdr *) skb->data;
+ fcookie = hdr->cookie;
+ }
+
+ if (fcookie == cookie)
+ return true;
+
+ return false;
+}
+
+static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
+ struct __wmi_event_txstatus *txs)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct sk_buff_head *epid_queue;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ u8 epid = MS(txs->ts_rate, ATH9K_HTC_TXSTAT_EPID);
+
+ epid_queue = get_htc_epid_queue(priv, epid);
+ if (!epid_queue)
+ return NULL;
+
+ spin_lock_irqsave(&epid_queue->lock, flags);
+ skb_queue_walk_safe(epid_queue, skb, tmp) {
+ if (check_cookie(priv, skb, txs->cookie, epid)) {
+ __skb_unlink(skb, epid_queue);
+ spin_unlock_irqrestore(&epid_queue->lock, flags);
+ return skb;
+ }
+ }
+ spin_unlock_irqrestore(&epid_queue->lock, flags);
+
+ ath_dbg(common, ATH_DBG_XMIT,
+ "No matching packet for cookie: %d, epid: %d\n",
+ txs->cookie, epid);
+
+ return NULL;
+}
+
+void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
+{
+ struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
+ struct __wmi_event_txstatus *__txs;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < txs->cnt; i++) {
+ WARN_ON(txs->cnt > HTC_MAX_TX_STATUS);
+
+ __txs = &txs->txstatus[i];
+
+ skb = ath9k_htc_tx_get_packet(priv, __txs);
+ if (!skb)
+ continue;
+
+ ath9k_htc_tx_process(priv, skb, __txs);
}
/* Wake TX queues if needed */
@@ -473,21 +618,34 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
struct ath9k_htc_tx_ctl *tx_ctl;
+ struct sk_buff_head *epid_queue;
tx_ctl = HTC_SKB_CB(skb);
tx_ctl->txok = txok;
- if (txok)
- skb_queue_tail(&priv->tx.tx_queue, skb);
- else
+ if (!txok) {
skb_queue_tail(&priv->tx.tx_failed, skb);
+ tasklet_schedule(&priv->tx_failed_tasklet);
+ return;
+ }
+
+ epid_queue = get_htc_epid_queue(priv, ep_id);
+ if (!epid_queue) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
- tasklet_schedule(&priv->tx_tasklet);
+ skb_queue_tail(epid_queue, skb);
}
int ath9k_tx_init(struct ath9k_htc_priv *priv)
{
- skb_queue_head_init(&priv->tx.tx_queue);
+ skb_queue_head_init(&priv->tx.mgmt_ep_queue);
+ skb_queue_head_init(&priv->tx.cab_ep_queue);
+ skb_queue_head_init(&priv->tx.data_be_queue);
+ skb_queue_head_init(&priv->tx.data_bk_queue);
+ skb_queue_head_init(&priv->tx.data_vi_queue);
+ skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
return 0;
}