blob: 8124353646ae64239556a0ed915f046f7e689864 [file] [log] [blame]
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +00001/*
2 * Copyright 2007-2012 Siemens AG
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Written by:
18 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
19 * Sergey Lapin <slapin@ossfans.org>
20 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
21 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
22 */
23
24#include <linux/netdevice.h>
25#include <linux/if_arp.h>
26#include <linux/crc-ccitt.h>
27
Alan Ottb5992fe2013-04-03 04:00:56 +000028#include <net/ieee802154_netdev.h>
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000029#include <net/mac802154.h>
30#include <net/wpan-phy.h>
31
32#include "mac802154.h"
33
34/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
35 * packets through the workqueue.
36 */
37struct xmit_work {
38 struct sk_buff *skb;
39 struct work_struct work;
40 struct mac802154_priv *priv;
41 u8 chan;
42 u8 page;
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000043};
44
45static void mac802154_xmit_worker(struct work_struct *work)
46{
47 struct xmit_work *xw = container_of(work, struct xmit_work, work);
Alan Ottb5992fe2013-04-03 04:00:56 +000048 struct mac802154_sub_if_data *sdata;
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000049 int res;
50
51 mutex_lock(&xw->priv->phy->pib_lock);
52 if (xw->priv->phy->current_channel != xw->chan ||
53 xw->priv->phy->current_page != xw->page) {
54 res = xw->priv->ops->set_channel(&xw->priv->hw,
55 xw->page,
56 xw->chan);
57 if (res) {
58 pr_debug("set_channel failed\n");
59 goto out;
60 }
Alan Ott9f7f78b2013-04-05 13:03:10 +000061
62 xw->priv->phy->current_channel = xw->chan;
63 xw->priv->phy->current_page = xw->page;
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000064 }
65
66 res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
Alan Ott7dd43d32013-04-03 04:00:55 +000067 if (res)
68 pr_debug("transmission failed\n");
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000069
70out:
71 mutex_unlock(&xw->priv->phy->pib_lock);
72
Alan Ottb5992fe2013-04-03 04:00:56 +000073 /* Restart the netif queue on each sub_if_data object. */
74 rcu_read_lock();
75 list_for_each_entry_rcu(sdata, &xw->priv->slaves, list)
76 netif_wake_queue(sdata->dev);
77 rcu_read_unlock();
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000078
79 dev_kfree_skb(xw->skb);
80
81 kfree(xw);
82}
83
84netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
85 u8 page, u8 chan)
86{
87 struct xmit_work *work;
Alan Ottb5992fe2013-04-03 04:00:56 +000088 struct mac802154_sub_if_data *sdata;
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000089
alex.bluesman.smirnov@gmail.com8a8e28b2012-06-25 03:30:13 +000090 if (!(priv->phy->channels_supported[page] & (1 << chan))) {
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000091 WARN_ON(1);
Alan Ottfcefbe92012-11-29 18:25:10 +000092 kfree_skb(skb);
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000093 return NETDEV_TX_OK;
alex.bluesman.smirnov@gmail.com8a8e28b2012-06-25 03:30:13 +000094 }
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000095
alex.bluesman.smirnov@gmail.com72fd5a82012-06-25 23:24:54 +000096 mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
97
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +000098 if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
99 u16 crc = crc_ccitt(0, skb->data, skb->len);
100 u8 *data = skb_put(skb, 2);
Varka Bhadram4710d802014-07-02 09:01:09 +0530101
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000102 data[0] = crc & 0xff;
103 data[1] = crc >> 8;
104 }
105
106 if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
Alan Ott92a2ec72012-11-29 18:25:11 +0000107 kfree_skb(skb);
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000108 return NETDEV_TX_OK;
109 }
110
111 work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
Alan Ottfcefbe92012-11-29 18:25:10 +0000112 if (!work) {
113 kfree_skb(skb);
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000114 return NETDEV_TX_BUSY;
Alan Ottfcefbe92012-11-29 18:25:10 +0000115 }
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000116
Alan Ottb5992fe2013-04-03 04:00:56 +0000117 /* Stop the netif queue on each sub_if_data object. */
118 rcu_read_lock();
119 list_for_each_entry_rcu(sdata, &priv->slaves, list)
120 netif_stop_queue(sdata->dev);
121 rcu_read_unlock();
122
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000123 INIT_WORK(&work->work, mac802154_xmit_worker);
124 work->skb = skb;
125 work->priv = priv;
126 work->page = page;
127 work->chan = chan;
alex.bluesman.smirnov@gmail.com5b641eb2012-05-15 20:50:22 +0000128
129 queue_work(priv->dev_workqueue, &work->work);
130
131 return NETDEV_TX_OK;
132}