Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | drbd_req.h |
| 3 | |
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
| 5 | |
| 6 | Copyright (C) 2006-2008, LINBIT Information Technologies GmbH. |
| 7 | Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
| 8 | Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
| 9 | |
| 10 | DRBD is free software; you can redistribute it and/or modify |
| 11 | it under the terms of the GNU General Public License as published by |
| 12 | the Free Software Foundation; either version 2, or (at your option) |
| 13 | any later version. |
| 14 | |
| 15 | DRBD is distributed in the hope that it will be useful, |
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | GNU General Public License for more details. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License |
| 21 | along with drbd; see the file COPYING. If not, write to |
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | */ |
| 24 | |
| 25 | #ifndef _DRBD_REQ_H |
| 26 | #define _DRBD_REQ_H |
| 27 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 28 | #include <linux/module.h> |
| 29 | |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/drbd.h> |
| 32 | #include "drbd_int.h" |
| 33 | #include "drbd_wrappers.h" |
| 34 | |
| 35 | /* The request callbacks will be called in irq context by the IDE drivers, |
| 36 | and in Softirqs/Tasklets/BH context by the SCSI drivers, |
| 37 | and by the receiver and worker in kernel-thread context. |
| 38 | Try to get the locking right :) */ |
| 39 | |
| 40 | /* |
| 41 | * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are |
| 42 | * associated with IO requests originating from the block layer above us. |
| 43 | * |
| 44 | * There are quite a few things that may happen to a drbd request |
| 45 | * during its lifetime. |
| 46 | * |
| 47 | * It will be created. |
| 48 | * It will be marked with the intention to be |
| 49 | * submitted to local disk and/or |
| 50 | * send via the network. |
| 51 | * |
| 52 | * It has to be placed on the transfer log and other housekeeping lists, |
| 53 | * In case we have a network connection. |
| 54 | * |
| 55 | * It may be identified as a concurrent (write) request |
| 56 | * and be handled accordingly. |
| 57 | * |
| 58 | * It may me handed over to the local disk subsystem. |
| 59 | * It may be completed by the local disk subsystem, |
Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 60 | * either successfully or with io-error. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 61 | * In case it is a READ request, and it failed locally, |
| 62 | * it may be retried remotely. |
| 63 | * |
| 64 | * It may be queued for sending. |
| 65 | * It may be handed over to the network stack, |
| 66 | * which may fail. |
| 67 | * It may be acknowledged by the "peer" according to the wire_protocol in use. |
| 68 | * this may be a negative ack. |
| 69 | * It may receive a faked ack when the network connection is lost and the |
| 70 | * transfer log is cleaned up. |
| 71 | * Sending may be canceled due to network connection loss. |
| 72 | * When it finally has outlived its time, |
| 73 | * corresponding dirty bits in the resync-bitmap may be cleared or set, |
| 74 | * it will be destroyed, |
| 75 | * and completion will be signalled to the originator, |
| 76 | * with or without "success". |
| 77 | */ |
| 78 | |
| 79 | enum drbd_req_event { |
| 80 | created, |
| 81 | to_be_send, |
| 82 | to_be_submitted, |
| 83 | |
| 84 | /* XXX yes, now I am inconsistent... |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 85 | * these are not "events" but "actions" |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 86 | * oh, well... */ |
| 87 | queue_for_net_write, |
| 88 | queue_for_net_read, |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 89 | queue_for_send_oos, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 90 | |
| 91 | send_canceled, |
| 92 | send_failed, |
| 93 | handed_over_to_network, |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 94 | oos_handed_to_network, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 95 | connection_lost_while_pending, |
Lars Ellenberg | d255e5f | 2010-05-27 09:45:45 +0200 | [diff] [blame] | 96 | read_retry_remote_canceled, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 97 | recv_acked_by_peer, |
| 98 | write_acked_by_peer, |
| 99 | write_acked_by_peer_and_sis, /* and set_in_sync */ |
| 100 | conflict_discarded_by_peer, |
| 101 | neg_acked, |
| 102 | barrier_acked, /* in protocol A and B */ |
| 103 | data_received, /* (remote read) */ |
| 104 | |
| 105 | read_completed_with_error, |
| 106 | read_ahead_completed_with_error, |
| 107 | write_completed_with_error, |
| 108 | completed_ok, |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 109 | resend, |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 110 | fail_frozen_disk_io, |
| 111 | restart_frozen_disk_io, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 112 | nothing, /* for tracing only */ |
| 113 | }; |
| 114 | |
| 115 | /* encoding of request states for now. we don't actually need that many bits. |
| 116 | * we don't need to do atomic bit operations either, since most of the time we |
| 117 | * need to look at the connection state and/or manipulate some lists at the |
| 118 | * same time, so we should hold the request lock anyways. |
| 119 | */ |
| 120 | enum drbd_req_state_bits { |
| 121 | /* 210 |
| 122 | * 000: no local possible |
| 123 | * 001: to be submitted |
| 124 | * UNUSED, we could map: 011: submitted, completion still pending |
| 125 | * 110: completed ok |
| 126 | * 010: completed with error |
| 127 | */ |
| 128 | __RQ_LOCAL_PENDING, |
| 129 | __RQ_LOCAL_COMPLETED, |
| 130 | __RQ_LOCAL_OK, |
| 131 | |
| 132 | /* 76543 |
| 133 | * 00000: no network possible |
| 134 | * 00001: to be send |
| 135 | * 00011: to be send, on worker queue |
| 136 | * 00101: sent, expecting recv_ack (B) or write_ack (C) |
| 137 | * 11101: sent, |
| 138 | * recv_ack (B) or implicit "ack" (A), |
| 139 | * still waiting for the barrier ack. |
| 140 | * master_bio may already be completed and invalidated. |
| 141 | * 11100: write_acked (C), |
| 142 | * data_received (for remote read, any protocol) |
| 143 | * or finally the barrier ack has arrived (B,A)... |
| 144 | * request can be freed |
| 145 | * 01100: neg-acked (write, protocol C) |
| 146 | * or neg-d-acked (read, any protocol) |
| 147 | * or killed from the transfer log |
| 148 | * during cleanup after connection loss |
| 149 | * request can be freed |
| 150 | * 01000: canceled or send failed... |
| 151 | * request can be freed |
| 152 | */ |
| 153 | |
| 154 | /* if "SENT" is not set, yet, this can still fail or be canceled. |
| 155 | * if "SENT" is set already, we still wait for an Ack packet. |
| 156 | * when cleared, the master_bio may be completed. |
| 157 | * in (B,A) the request object may still linger on the transaction log |
| 158 | * until the corresponding barrier ack comes in */ |
| 159 | __RQ_NET_PENDING, |
| 160 | |
| 161 | /* If it is QUEUED, and it is a WRITE, it is also registered in the |
| 162 | * transfer log. Currently we need this flag to avoid conflicts between |
| 163 | * worker canceling the request and tl_clear_barrier killing it from |
| 164 | * transfer log. We should restructure the code so this conflict does |
| 165 | * no longer occur. */ |
| 166 | __RQ_NET_QUEUED, |
| 167 | |
| 168 | /* well, actually only "handed over to the network stack". |
| 169 | * |
| 170 | * TODO can potentially be dropped because of the similar meaning |
| 171 | * of RQ_NET_SENT and ~RQ_NET_QUEUED. |
| 172 | * however it is not exactly the same. before we drop it |
| 173 | * we must ensure that we can tell a request with network part |
| 174 | * from a request without, regardless of what happens to it. */ |
| 175 | __RQ_NET_SENT, |
| 176 | |
| 177 | /* when set, the request may be freed (if RQ_NET_QUEUED is clear). |
| 178 | * basically this means the corresponding P_BARRIER_ACK was received */ |
| 179 | __RQ_NET_DONE, |
| 180 | |
| 181 | /* whether or not we know (C) or pretend (B,A) that the write |
| 182 | * was successfully written on the peer. |
| 183 | */ |
| 184 | __RQ_NET_OK, |
| 185 | |
| 186 | /* peer called drbd_set_in_sync() for this write */ |
| 187 | __RQ_NET_SIS, |
| 188 | |
| 189 | /* keep this last, its for the RQ_NET_MASK */ |
| 190 | __RQ_NET_MAX, |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 191 | |
| 192 | /* Set when this is a write, clear for a read */ |
| 193 | __RQ_WRITE, |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 194 | |
| 195 | /* Should call drbd_al_complete_io() for this request... */ |
| 196 | __RQ_IN_ACT_LOG, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 197 | }; |
| 198 | |
| 199 | #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) |
| 200 | #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED) |
| 201 | #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK) |
| 202 | |
| 203 | #define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */ |
| 204 | |
| 205 | #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING) |
| 206 | #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED) |
| 207 | #define RQ_NET_SENT (1UL << __RQ_NET_SENT) |
| 208 | #define RQ_NET_DONE (1UL << __RQ_NET_DONE) |
| 209 | #define RQ_NET_OK (1UL << __RQ_NET_OK) |
| 210 | #define RQ_NET_SIS (1UL << __RQ_NET_SIS) |
| 211 | |
| 212 | /* 0x1f8 */ |
| 213 | #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) |
| 214 | |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 215 | #define RQ_WRITE (1UL << __RQ_WRITE) |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 216 | #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 217 | |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 218 | /* For waking up the frozen transfer log mod_req() has to return if the request |
| 219 | should be counted in the epoch object*/ |
| 220 | #define MR_WRITE_SHIFT 0 |
| 221 | #define MR_WRITE (1 << MR_WRITE_SHIFT) |
| 222 | #define MR_READ_SHIFT 1 |
| 223 | #define MR_READ (1 << MR_READ_SHIFT) |
| 224 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 225 | /* epoch entries */ |
| 226 | static inline |
| 227 | struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) |
| 228 | { |
| 229 | BUG_ON(mdev->ee_hash_s == 0); |
| 230 | return mdev->ee_hash + |
| 231 | ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s); |
| 232 | } |
| 233 | |
| 234 | /* transfer log (drbd_request objects) */ |
| 235 | static inline |
| 236 | struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector) |
| 237 | { |
| 238 | BUG_ON(mdev->tl_hash_s == 0); |
| 239 | return mdev->tl_hash + |
| 240 | ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s); |
| 241 | } |
| 242 | |
| 243 | /* application reads (drbd_request objects) */ |
| 244 | static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector) |
| 245 | { |
| 246 | return mdev->app_reads_hash |
| 247 | + ((unsigned int)(sector) % APP_R_HSIZE); |
| 248 | } |
| 249 | |
| 250 | /* when we receive the answer for a read request, |
| 251 | * verify that we actually know about it */ |
| 252 | static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, |
| 253 | u64 id, sector_t sector) |
| 254 | { |
| 255 | struct hlist_head *slot = ar_hash_slot(mdev, sector); |
| 256 | struct hlist_node *n; |
| 257 | struct drbd_request *req; |
| 258 | |
Bart Van Assche | 24c4830 | 2011-05-21 18:32:29 +0200 | [diff] [blame] | 259 | hlist_for_each_entry(req, n, slot, collision) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 260 | if ((unsigned long)req == (unsigned long)id) { |
| 261 | D_ASSERT(req->sector == sector); |
| 262 | return req; |
| 263 | } |
| 264 | } |
| 265 | return NULL; |
| 266 | } |
| 267 | |
Philipp Reisner | 5ba8230 | 2010-06-10 13:30:36 +0200 | [diff] [blame] | 268 | static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) |
| 269 | { |
| 270 | struct bio *bio; |
| 271 | bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ |
| 272 | |
| 273 | req->private_bio = bio; |
| 274 | |
| 275 | bio->bi_private = req; |
| 276 | bio->bi_end_io = drbd_endio_pri; |
| 277 | bio->bi_next = NULL; |
| 278 | } |
| 279 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 280 | static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
| 281 | struct bio *bio_src) |
| 282 | { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 283 | struct drbd_request *req = |
| 284 | mempool_alloc(drbd_request_mempool, GFP_NOIO); |
| 285 | if (likely(req)) { |
Philipp Reisner | 5ba8230 | 2010-06-10 13:30:36 +0200 | [diff] [blame] | 286 | drbd_req_make_private_bio(req, bio_src); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 287 | |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 288 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 289 | req->mdev = mdev; |
| 290 | req->master_bio = bio_src; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 291 | req->epoch = 0; |
Philipp Reisner | 5ba8230 | 2010-06-10 13:30:36 +0200 | [diff] [blame] | 292 | req->sector = bio_src->bi_sector; |
| 293 | req->size = bio_src->bi_size; |
Bart Van Assche | 24c4830 | 2011-05-21 18:32:29 +0200 | [diff] [blame] | 294 | INIT_HLIST_NODE(&req->collision); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 295 | INIT_LIST_HEAD(&req->tl_requests); |
| 296 | INIT_LIST_HEAD(&req->w.list); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 297 | } |
| 298 | return req; |
| 299 | } |
| 300 | |
| 301 | static inline void drbd_req_free(struct drbd_request *req) |
| 302 | { |
| 303 | mempool_free(req, drbd_request_mempool); |
| 304 | } |
| 305 | |
| 306 | static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) |
| 307 | { |
| 308 | return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); |
| 309 | } |
| 310 | |
| 311 | /* Short lived temporary struct on the stack. |
| 312 | * We could squirrel the error to be returned into |
| 313 | * bio->bi_size, or similar. But that would be too ugly. */ |
| 314 | struct bio_and_error { |
| 315 | struct bio *bio; |
| 316 | int error; |
| 317 | }; |
| 318 | |
| 319 | extern void _req_may_be_done(struct drbd_request *req, |
| 320 | struct bio_and_error *m); |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 321 | extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 322 | struct bio_and_error *m); |
| 323 | extern void complete_master_bio(struct drbd_conf *mdev, |
| 324 | struct bio_and_error *m); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 325 | extern void request_timer_fn(unsigned long data); |
Andrew Morton | 0ddf72b | 2011-05-23 15:29:32 -0700 | [diff] [blame] | 326 | extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 327 | |
| 328 | /* use this if you don't want to deal with calling complete_master_bio() |
| 329 | * outside the spinlock, e.g. when walking some list on cleanup. */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 330 | static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 331 | { |
| 332 | struct drbd_conf *mdev = req->mdev; |
| 333 | struct bio_and_error m; |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 334 | int rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 335 | |
| 336 | /* __req_mod possibly frees req, do not touch req after that! */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 337 | rv = __req_mod(req, what, &m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 338 | if (m.bio) |
| 339 | complete_master_bio(mdev, &m); |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 340 | |
| 341 | return rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 342 | } |
| 343 | |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 344 | /* completion of master bio is outside of our spinlock. |
| 345 | * We still may or may not be inside some irqs disabled section |
| 346 | * of the lower level driver completion callback, so we need to |
| 347 | * spin_lock_irqsave here. */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 348 | static inline int req_mod(struct drbd_request *req, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 349 | enum drbd_req_event what) |
| 350 | { |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 351 | unsigned long flags; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 352 | struct drbd_conf *mdev = req->mdev; |
| 353 | struct bio_and_error m; |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 354 | int rv; |
| 355 | |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 356 | spin_lock_irqsave(&mdev->req_lock, flags); |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 357 | rv = __req_mod(req, what, &m); |
Philipp Reisner | 759fbdf | 2010-10-26 16:02:27 +0200 | [diff] [blame] | 358 | spin_unlock_irqrestore(&mdev->req_lock, flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 359 | |
| 360 | if (m.bio) |
| 361 | complete_master_bio(mdev, &m); |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 362 | |
| 363 | return rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 364 | } |
Philipp Reisner | 6a35c45 | 2011-01-17 20:27:30 +0100 | [diff] [blame] | 365 | |
| 366 | static inline bool drbd_should_do_remote(union drbd_state s) |
| 367 | { |
| 368 | return s.pdsk == D_UP_TO_DATE || |
| 369 | (s.pdsk >= D_INCONSISTENT && |
| 370 | s.conn >= C_WF_BITMAP_T && |
| 371 | s.conn < C_AHEAD); |
| 372 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. |
| 373 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* |
| 374 | states. */ |
| 375 | } |
| 376 | static inline bool drbd_should_send_oos(union drbd_state s) |
| 377 | { |
| 378 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; |
| 379 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary |
| 380 | since we enter state C_AHEAD only if proto >= 96 */ |
| 381 | } |
| 382 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 383 | #endif |