Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) ST-Ericsson AB 2010 |
| 3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com |
| 4 | * License terms: GNU General Public License (GPL) version 2 |
| 5 | */ |
| 6 | |
Joe Perches | b31fa5b | 2010-09-05 21:31:11 +0000 | [diff] [blame] | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
| 8 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 9 | #include <linux/stddef.h> |
| 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/slab.h> |
Jeff Mahoney | 7e36873 | 2010-08-26 16:11:08 -0700 | [diff] [blame] | 12 | #include <asm/unaligned.h> |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 13 | #include <net/caif/caif_layer.h> |
| 14 | #include <net/caif/cfsrvl.h> |
| 15 | #include <net/caif/cfpkt.h> |
| 16 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 17 | #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer) |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 18 | #define RFM_SEGMENTATION_BIT 0x01 |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 19 | #define RFM_HEAD_SIZE 7 |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 20 | |
| 21 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); |
| 22 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 23 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 24 | struct cfrfml { |
| 25 | struct cfsrvl serv; |
| 26 | struct cfpkt *incomplete_frm; |
| 27 | int fragment_size; |
| 28 | u8 seghead[6]; |
| 29 | u16 pdu_size; |
| 30 | /* Protects serialized processing of packets */ |
| 31 | spinlock_t sync; |
| 32 | }; |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 33 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 34 | static void cfrfml_release(struct kref *kref) |
| 35 | { |
| 36 | struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); |
| 37 | struct cfrfml *rfml = container_obj(&srvl->layer); |
| 38 | |
| 39 | if (rfml->incomplete_frm) |
| 40 | cfpkt_destroy(rfml->incomplete_frm); |
| 41 | |
| 42 | kfree(srvl); |
| 43 | } |
| 44 | |
| 45 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, |
| 46 | int mtu_size) |
| 47 | { |
| 48 | int tmp; |
| 49 | struct cfrfml *this = |
| 50 | kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); |
| 51 | |
| 52 | if (!this) { |
Joe Perches | b31fa5b | 2010-09-05 21:31:11 +0000 | [diff] [blame] | 53 | pr_warn("Out of memory\n"); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 54 | return NULL; |
| 55 | } |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 56 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 57 | cfsrvl_init(&this->serv, channel_id, dev_info, false); |
| 58 | this->serv.release = cfrfml_release; |
| 59 | this->serv.layer.receive = cfrfml_receive; |
| 60 | this->serv.layer.transmit = cfrfml_transmit; |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 61 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 62 | /* Round down to closest multiple of 16 */ |
| 63 | tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16; |
| 64 | tmp *= 16; |
| 65 | |
| 66 | this->fragment_size = tmp; |
| 67 | spin_lock_init(&this->sync); |
| 68 | snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ, |
| 69 | "rfm%d", channel_id); |
| 70 | |
| 71 | return &this->serv.layer; |
| 72 | } |
| 73 | |
| 74 | static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, |
| 75 | struct cfpkt *pkt, int *err) |
| 76 | { |
| 77 | struct cfpkt *tmppkt; |
| 78 | *err = -EPROTO; |
| 79 | /* n-th but not last segment */ |
| 80 | |
| 81 | if (cfpkt_extr_head(pkt, seghead, 6) < 0) |
| 82 | return NULL; |
| 83 | |
| 84 | /* Verify correct header */ |
| 85 | if (memcmp(seghead, rfml->seghead, 6) != 0) |
| 86 | return NULL; |
| 87 | |
| 88 | tmppkt = cfpkt_append(rfml->incomplete_frm, pkt, |
| 89 | rfml->pdu_size + RFM_HEAD_SIZE); |
| 90 | |
| 91 | /* If cfpkt_append failes input pkts are not freed */ |
| 92 | *err = -ENOMEM; |
| 93 | if (tmppkt == NULL) |
| 94 | return NULL; |
| 95 | |
| 96 | *err = 0; |
| 97 | return tmppkt; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 98 | } |
| 99 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 100 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) |
| 101 | { |
| 102 | u8 tmp; |
| 103 | bool segmented; |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 104 | int err; |
| 105 | u8 seghead[6]; |
| 106 | struct cfrfml *rfml; |
| 107 | struct cfpkt *tmppkt = NULL; |
| 108 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 109 | caif_assert(layr->up != NULL); |
| 110 | caif_assert(layr->receive != NULL); |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 111 | rfml = container_obj(layr); |
| 112 | spin_lock(&rfml->sync); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 113 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 114 | err = -EPROTO; |
| 115 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) |
| 116 | goto out; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 117 | segmented = tmp & RFM_SEGMENTATION_BIT; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 118 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 119 | if (segmented) { |
| 120 | if (rfml->incomplete_frm == NULL) { |
| 121 | /* Initial Segment */ |
| 122 | if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) |
| 123 | goto out; |
| 124 | |
| 125 | rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); |
| 126 | |
| 127 | if (cfpkt_erroneous(pkt)) |
| 128 | goto out; |
| 129 | rfml->incomplete_frm = pkt; |
| 130 | pkt = NULL; |
| 131 | } else { |
| 132 | |
| 133 | tmppkt = rfm_append(rfml, seghead, pkt, &err); |
| 134 | if (tmppkt == NULL) |
| 135 | goto out; |
| 136 | |
| 137 | if (cfpkt_erroneous(tmppkt)) |
| 138 | goto out; |
| 139 | |
| 140 | rfml->incomplete_frm = tmppkt; |
| 141 | |
| 142 | |
| 143 | if (cfpkt_erroneous(tmppkt)) |
| 144 | goto out; |
| 145 | } |
| 146 | err = 0; |
| 147 | goto out; |
| 148 | } |
| 149 | |
| 150 | if (rfml->incomplete_frm) { |
| 151 | |
| 152 | /* Last Segment */ |
| 153 | tmppkt = rfm_append(rfml, seghead, pkt, &err); |
| 154 | if (tmppkt == NULL) |
| 155 | goto out; |
| 156 | |
| 157 | if (cfpkt_erroneous(tmppkt)) |
| 158 | goto out; |
| 159 | |
| 160 | rfml->incomplete_frm = NULL; |
| 161 | pkt = tmppkt; |
| 162 | tmppkt = NULL; |
| 163 | |
| 164 | /* Verify that length is correct */ |
| 165 | err = EPROTO; |
| 166 | if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) |
| 167 | goto out; |
| 168 | } |
| 169 | |
| 170 | err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt); |
| 171 | |
| 172 | out: |
| 173 | |
| 174 | if (err != 0) { |
| 175 | if (tmppkt) |
| 176 | cfpkt_destroy(tmppkt); |
| 177 | if (pkt) |
| 178 | cfpkt_destroy(pkt); |
| 179 | if (rfml->incomplete_frm) |
| 180 | cfpkt_destroy(rfml->incomplete_frm); |
| 181 | rfml->incomplete_frm = NULL; |
| 182 | |
Joe Perches | b31fa5b | 2010-09-05 21:31:11 +0000 | [diff] [blame] | 183 | pr_info("Connection error %d triggered on RFM link\n", err); |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 184 | |
| 185 | /* Trigger connection error upon failure.*/ |
| 186 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, |
| 187 | rfml->serv.dev_info.id); |
| 188 | } |
| 189 | spin_unlock(&rfml->sync); |
| 190 | return err; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 191 | } |
| 192 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 193 | |
| 194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 195 | { |
André Carvalho de Matos | f2527ec | 2010-11-01 11:52:47 +0000 | [diff] [blame] | 196 | caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 197 | |
| 198 | /* Add info for MUX-layer to route the packet out. */ |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; |
| 200 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 201 | /* |
| 202 | * To optimize alignment, we add up the size of CAIF header before |
| 203 | * payload. |
| 204 | */ |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 205 | cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE; |
| 206 | cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info; |
| 207 | |
| 208 | return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt); |
| 209 | } |
| 210 | |
| 211 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) |
| 212 | { |
| 213 | int err; |
| 214 | u8 seg; |
| 215 | u8 head[6]; |
| 216 | struct cfpkt *rearpkt = NULL; |
| 217 | struct cfpkt *frontpkt = pkt; |
| 218 | struct cfrfml *rfml = container_obj(layr); |
| 219 | |
| 220 | caif_assert(layr->dn != NULL); |
| 221 | caif_assert(layr->dn->transmit != NULL); |
| 222 | |
| 223 | if (!cfsrvl_ready(&rfml->serv, &err)) |
| 224 | return err; |
| 225 | |
| 226 | err = -EPROTO; |
| 227 | if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) |
| 228 | goto out; |
| 229 | |
| 230 | err = 0; |
| 231 | if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) |
| 232 | err = cfpkt_peek_head(pkt, head, 6); |
| 233 | |
| 234 | if (err < 0) |
| 235 | goto out; |
| 236 | |
| 237 | while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { |
| 238 | |
| 239 | seg = 1; |
| 240 | err = -EPROTO; |
| 241 | |
| 242 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) |
| 243 | goto out; |
| 244 | /* |
| 245 | * On OOM error cfpkt_split returns NULL. |
| 246 | * |
| 247 | * NOTE: Segmented pdu is not correctly aligned. |
| 248 | * This has negative performance impact. |
| 249 | */ |
| 250 | |
| 251 | rearpkt = cfpkt_split(frontpkt, rfml->fragment_size); |
| 252 | if (rearpkt == NULL) |
| 253 | goto out; |
| 254 | |
| 255 | err = cfrfml_transmit_segment(rfml, frontpkt); |
| 256 | |
| 257 | if (err != 0) |
| 258 | goto out; |
| 259 | frontpkt = rearpkt; |
| 260 | rearpkt = NULL; |
| 261 | |
| 262 | err = -ENOMEM; |
| 263 | if (frontpkt == NULL) |
| 264 | goto out; |
| 265 | err = -EPROTO; |
| 266 | if (cfpkt_add_head(frontpkt, head, 6) < 0) |
| 267 | goto out; |
| 268 | |
| 269 | } |
| 270 | |
| 271 | seg = 0; |
| 272 | err = -EPROTO; |
| 273 | |
| 274 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) |
| 275 | goto out; |
| 276 | |
| 277 | err = cfrfml_transmit_segment(rfml, frontpkt); |
| 278 | |
| 279 | frontpkt = NULL; |
| 280 | out: |
| 281 | |
| 282 | if (err != 0) { |
Joe Perches | b31fa5b | 2010-09-05 21:31:11 +0000 | [diff] [blame] | 283 | pr_info("Connection error %d triggered on RFM link\n", err); |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 284 | /* Trigger connection error upon failure.*/ |
| 285 | |
| 286 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, |
| 287 | rfml->serv.dev_info.id); |
| 288 | |
| 289 | if (rearpkt) |
| 290 | cfpkt_destroy(rearpkt); |
| 291 | |
| 292 | if (frontpkt && frontpkt != pkt) { |
| 293 | |
| 294 | cfpkt_destroy(frontpkt); |
| 295 | /* |
| 296 | * Socket layer will free the original packet, |
| 297 | * but this packet may already be sent and |
| 298 | * freed. So we have to return 0 in this case |
| 299 | * to avoid socket layer to re-free this packet. |
| 300 | * The return of shutdown indication will |
| 301 | * cause connection to be invalidated anyhow. |
| 302 | */ |
| 303 | err = 0; |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | return err; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 308 | } |