Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) ST-Ericsson AB 2010 |
| 3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com |
| 4 | * License terms: GNU General Public License (GPL) version 2 |
| 5 | */ |
| 6 | |
| 7 | #include <linux/stddef.h> |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/slab.h> |
Jeff Mahoney | 7e36873 | 2010-08-26 16:11:08 -0700 | [diff] [blame] | 10 | #include <asm/unaligned.h> |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 11 | #include <net/caif/caif_layer.h> |
| 12 | #include <net/caif/cfsrvl.h> |
| 13 | #include <net/caif/cfpkt.h> |
| 14 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 15 | #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer) |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 16 | #define RFM_SEGMENTATION_BIT 0x01 |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 17 | #define RFM_HEAD_SIZE 7 |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 18 | |
| 19 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); |
| 20 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 21 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 22 | struct cfrfml { |
| 23 | struct cfsrvl serv; |
| 24 | struct cfpkt *incomplete_frm; |
| 25 | int fragment_size; |
| 26 | u8 seghead[6]; |
| 27 | u16 pdu_size; |
| 28 | /* Protects serialized processing of packets */ |
| 29 | spinlock_t sync; |
| 30 | }; |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 31 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 32 | static void cfrfml_release(struct kref *kref) |
| 33 | { |
| 34 | struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); |
| 35 | struct cfrfml *rfml = container_obj(&srvl->layer); |
| 36 | |
| 37 | if (rfml->incomplete_frm) |
| 38 | cfpkt_destroy(rfml->incomplete_frm); |
| 39 | |
| 40 | kfree(srvl); |
| 41 | } |
| 42 | |
| 43 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, |
| 44 | int mtu_size) |
| 45 | { |
| 46 | int tmp; |
| 47 | struct cfrfml *this = |
| 48 | kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); |
| 49 | |
| 50 | if (!this) { |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 51 | pr_warning("CAIF: %s(): Out of memory\n", __func__); |
| 52 | return NULL; |
| 53 | } |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 54 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 55 | cfsrvl_init(&this->serv, channel_id, dev_info, false); |
| 56 | this->serv.release = cfrfml_release; |
| 57 | this->serv.layer.receive = cfrfml_receive; |
| 58 | this->serv.layer.transmit = cfrfml_transmit; |
Sjur Braendeland | b1c7424 | 2010-06-17 06:55:38 +0000 | [diff] [blame] | 59 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 60 | /* Round down to closest multiple of 16 */ |
| 61 | tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16; |
| 62 | tmp *= 16; |
| 63 | |
| 64 | this->fragment_size = tmp; |
| 65 | spin_lock_init(&this->sync); |
| 66 | snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ, |
| 67 | "rfm%d", channel_id); |
| 68 | |
| 69 | return &this->serv.layer; |
| 70 | } |
| 71 | |
| 72 | static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, |
| 73 | struct cfpkt *pkt, int *err) |
| 74 | { |
| 75 | struct cfpkt *tmppkt; |
| 76 | *err = -EPROTO; |
| 77 | /* n-th but not last segment */ |
| 78 | |
| 79 | if (cfpkt_extr_head(pkt, seghead, 6) < 0) |
| 80 | return NULL; |
| 81 | |
| 82 | /* Verify correct header */ |
| 83 | if (memcmp(seghead, rfml->seghead, 6) != 0) |
| 84 | return NULL; |
| 85 | |
| 86 | tmppkt = cfpkt_append(rfml->incomplete_frm, pkt, |
| 87 | rfml->pdu_size + RFM_HEAD_SIZE); |
| 88 | |
| 89 | /* If cfpkt_append failes input pkts are not freed */ |
| 90 | *err = -ENOMEM; |
| 91 | if (tmppkt == NULL) |
| 92 | return NULL; |
| 93 | |
| 94 | *err = 0; |
| 95 | return tmppkt; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 96 | } |
| 97 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 98 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) |
| 99 | { |
| 100 | u8 tmp; |
| 101 | bool segmented; |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 102 | int err; |
| 103 | u8 seghead[6]; |
| 104 | struct cfrfml *rfml; |
| 105 | struct cfpkt *tmppkt = NULL; |
| 106 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 107 | caif_assert(layr->up != NULL); |
| 108 | caif_assert(layr->receive != NULL); |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 109 | rfml = container_obj(layr); |
| 110 | spin_lock(&rfml->sync); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 111 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 112 | err = -EPROTO; |
| 113 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) |
| 114 | goto out; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 115 | segmented = tmp & RFM_SEGMENTATION_BIT; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 116 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 117 | if (segmented) { |
| 118 | if (rfml->incomplete_frm == NULL) { |
| 119 | /* Initial Segment */ |
| 120 | if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) |
| 121 | goto out; |
| 122 | |
| 123 | rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); |
| 124 | |
| 125 | if (cfpkt_erroneous(pkt)) |
| 126 | goto out; |
| 127 | rfml->incomplete_frm = pkt; |
| 128 | pkt = NULL; |
| 129 | } else { |
| 130 | |
| 131 | tmppkt = rfm_append(rfml, seghead, pkt, &err); |
| 132 | if (tmppkt == NULL) |
| 133 | goto out; |
| 134 | |
| 135 | if (cfpkt_erroneous(tmppkt)) |
| 136 | goto out; |
| 137 | |
| 138 | rfml->incomplete_frm = tmppkt; |
| 139 | |
| 140 | |
| 141 | if (cfpkt_erroneous(tmppkt)) |
| 142 | goto out; |
| 143 | } |
| 144 | err = 0; |
| 145 | goto out; |
| 146 | } |
| 147 | |
| 148 | if (rfml->incomplete_frm) { |
| 149 | |
| 150 | /* Last Segment */ |
| 151 | tmppkt = rfm_append(rfml, seghead, pkt, &err); |
| 152 | if (tmppkt == NULL) |
| 153 | goto out; |
| 154 | |
| 155 | if (cfpkt_erroneous(tmppkt)) |
| 156 | goto out; |
| 157 | |
| 158 | rfml->incomplete_frm = NULL; |
| 159 | pkt = tmppkt; |
| 160 | tmppkt = NULL; |
| 161 | |
| 162 | /* Verify that length is correct */ |
| 163 | err = EPROTO; |
| 164 | if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) |
| 165 | goto out; |
| 166 | } |
| 167 | |
| 168 | err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt); |
| 169 | |
| 170 | out: |
| 171 | |
| 172 | if (err != 0) { |
| 173 | if (tmppkt) |
| 174 | cfpkt_destroy(tmppkt); |
| 175 | if (pkt) |
| 176 | cfpkt_destroy(pkt); |
| 177 | if (rfml->incomplete_frm) |
| 178 | cfpkt_destroy(rfml->incomplete_frm); |
| 179 | rfml->incomplete_frm = NULL; |
| 180 | |
| 181 | pr_info("CAIF: %s(): " |
| 182 | "Connection error %d triggered on RFM link\n", |
| 183 | __func__, err); |
| 184 | |
| 185 | /* Trigger connection error upon failure.*/ |
| 186 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, |
| 187 | rfml->serv.dev_info.id); |
| 188 | } |
| 189 | spin_unlock(&rfml->sync); |
| 190 | return err; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 191 | } |
| 192 | |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 193 | |
| 194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 195 | { |
Dan Carpenter | b77026b | 2010-07-22 01:11:31 +0000 | [diff] [blame] | 196 | caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 197 | |
| 198 | /* Add info for MUX-layer to route the packet out. */ |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; |
| 200 | |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 201 | /* |
| 202 | * To optimize alignment, we add up the size of CAIF header before |
| 203 | * payload. |
| 204 | */ |
Sjur Braendeland | a7da1f5 | 2010-06-17 06:55:39 +0000 | [diff] [blame] | 205 | cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE; |
| 206 | cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info; |
| 207 | |
| 208 | return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt); |
| 209 | } |
| 210 | |
| 211 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) |
| 212 | { |
| 213 | int err; |
| 214 | u8 seg; |
| 215 | u8 head[6]; |
| 216 | struct cfpkt *rearpkt = NULL; |
| 217 | struct cfpkt *frontpkt = pkt; |
| 218 | struct cfrfml *rfml = container_obj(layr); |
| 219 | |
| 220 | caif_assert(layr->dn != NULL); |
| 221 | caif_assert(layr->dn->transmit != NULL); |
| 222 | |
| 223 | if (!cfsrvl_ready(&rfml->serv, &err)) |
| 224 | return err; |
| 225 | |
| 226 | err = -EPROTO; |
| 227 | if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) |
| 228 | goto out; |
| 229 | |
| 230 | err = 0; |
| 231 | if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) |
| 232 | err = cfpkt_peek_head(pkt, head, 6); |
| 233 | |
| 234 | if (err < 0) |
| 235 | goto out; |
| 236 | |
| 237 | while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { |
| 238 | |
| 239 | seg = 1; |
| 240 | err = -EPROTO; |
| 241 | |
| 242 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) |
| 243 | goto out; |
| 244 | /* |
| 245 | * On OOM error cfpkt_split returns NULL. |
| 246 | * |
| 247 | * NOTE: Segmented pdu is not correctly aligned. |
| 248 | * This has negative performance impact. |
| 249 | */ |
| 250 | |
| 251 | rearpkt = cfpkt_split(frontpkt, rfml->fragment_size); |
| 252 | if (rearpkt == NULL) |
| 253 | goto out; |
| 254 | |
| 255 | err = cfrfml_transmit_segment(rfml, frontpkt); |
| 256 | |
| 257 | if (err != 0) |
| 258 | goto out; |
| 259 | frontpkt = rearpkt; |
| 260 | rearpkt = NULL; |
| 261 | |
| 262 | err = -ENOMEM; |
| 263 | if (frontpkt == NULL) |
| 264 | goto out; |
| 265 | err = -EPROTO; |
| 266 | if (cfpkt_add_head(frontpkt, head, 6) < 0) |
| 267 | goto out; |
| 268 | |
| 269 | } |
| 270 | |
| 271 | seg = 0; |
| 272 | err = -EPROTO; |
| 273 | |
| 274 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) |
| 275 | goto out; |
| 276 | |
| 277 | err = cfrfml_transmit_segment(rfml, frontpkt); |
| 278 | |
| 279 | frontpkt = NULL; |
| 280 | out: |
| 281 | |
| 282 | if (err != 0) { |
| 283 | pr_info("CAIF: %s(): " |
| 284 | "Connection error %d triggered on RFM link\n", |
| 285 | __func__, err); |
| 286 | /* Trigger connection error upon failure.*/ |
| 287 | |
| 288 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, |
| 289 | rfml->serv.dev_info.id); |
| 290 | |
| 291 | if (rearpkt) |
| 292 | cfpkt_destroy(rearpkt); |
| 293 | |
| 294 | if (frontpkt && frontpkt != pkt) { |
| 295 | |
| 296 | cfpkt_destroy(frontpkt); |
| 297 | /* |
| 298 | * Socket layer will free the original packet, |
| 299 | * but this packet may already be sent and |
| 300 | * freed. So we have to return 0 in this case |
| 301 | * to avoid socket layer to re-free this packet. |
| 302 | * The return of shutdown indication will |
| 303 | * cause connection to be invalidated anyhow. |
| 304 | */ |
| 305 | err = 0; |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | return err; |
Sjur Braendeland | b482cd2 | 2010-03-30 13:56:23 +0000 | [diff] [blame] | 310 | } |