blob: 130b65e806b8987268613aba45626af0a6bba3d3 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/smux_test.c
2 *
Eric Holmberg837a9342012-10-29 14:12:34 -06003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/debugfs.h>
16#include <linux/list.h>
17#include <linux/ctype.h>
18#include <linux/jiffies.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/completion.h>
22#include <linux/termios.h>
Eric Holmberg6966d5d2012-10-11 11:13:46 -060023#include <linux/sched.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060024#include <linux/smux.h>
Eric Holmberg06011322012-07-06 18:17:03 -060025#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060026#include "smux_private.h"
27
28#define DEBUG_BUFMAX 4096
Eric Holmberg0b2c02a2013-04-09 11:56:53 -060029#define RED_ZONE_SIZE 16
30#define RED_ZONE_PRE_CH 0xAB
31#define RED_ZONE_POS_CH 0xBA
32#define SMUX_REMOTE_INACTIVITY_TIME_MS 50
33#define SMUX_REMOTE_DELAY_TIME_MS 250
Eric Holmberg8ed30f22012-05-10 19:16:51 -060034
35/**
36 * Unit test assertion for logging test cases.
37 *
38 * @a lval
39 * @b rval
40 * @cmp comparison operator
41 *
42 * Assertion fails if (@a cmp @b) is not true which then
43 * logs the function and line number where the error occurred
44 * along with the values of @a and @b.
45 *
46 * Assumes that the following local variables exist:
47 * @buf - buffer to write failure message to
48 * @i - number of bytes written to buffer
49 * @max - maximum size of the buffer
50 * @failed - set to true if test fails
51 */
52#define UT_ASSERT_INT(a, cmp, b) \
Eric Holmberg527b66f2012-07-02 16:01:16 -060053 { \
54 int a_tmp = (a); \
55 int b_tmp = (b); \
56 if (!((a_tmp)cmp(b_tmp))) { \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057 i += scnprintf(buf + i, max - i, \
58 "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
59 __func__, __LINE__, \
Eric Holmberg527b66f2012-07-02 16:01:16 -060060 a_tmp, b_tmp); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060061 failed = 1; \
62 break; \
63 } \
Eric Holmberg527b66f2012-07-02 16:01:16 -060064 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -060065
66#define UT_ASSERT_PTR(a, cmp, b) \
Eric Holmberg527b66f2012-07-02 16:01:16 -060067 { \
68 void *a_tmp = (a); \
69 void *b_tmp = (b); \
70 if (!((a_tmp)cmp(b_tmp))) { \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060071 i += scnprintf(buf + i, max - i, \
72 "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
73 __func__, __LINE__, \
Eric Holmberg527b66f2012-07-02 16:01:16 -060074 a_tmp, b_tmp); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060075 failed = 1; \
76 break; \
77 } \
Eric Holmberg527b66f2012-07-02 16:01:16 -060078 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -060079
80#define UT_ASSERT_UINT(a, cmp, b) \
Eric Holmberg527b66f2012-07-02 16:01:16 -060081 { \
82 unsigned a_tmp = (a); \
83 unsigned b_tmp = (b); \
84 if (!((a_tmp)cmp(b_tmp))) { \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085 i += scnprintf(buf + i, max - i, \
86 "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
87 __func__, __LINE__, \
Eric Holmberg527b66f2012-07-02 16:01:16 -060088 a_tmp, b_tmp); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060089 failed = 1; \
90 break; \
91 } \
Eric Holmberg527b66f2012-07-02 16:01:16 -060092 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -060093
Eric Holmbergb8435c82012-06-05 14:51:29 -060094/**
95 * In-range unit test assertion for test cases.
96 *
97 * @a lval
98 * @minv Minimum value
99 * @maxv Maximum value
100 *
101 * Assertion fails if @a is not on the exclusive range minv, maxv
102 * ((@a < @minv) or (@a > @maxv)). In the failure case, the macro
103 * logs the function and line number where the error occurred along
104 * with the values of @a and @minv, @maxv.
105 *
106 * Assumes that the following local variables exist:
107 * @buf - buffer to write failure message to
108 * @i - number of bytes written to buffer
109 * @max - maximum size of the buffer
110 * @failed - set to true if test fails
111 */
112#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
Eric Holmberg527b66f2012-07-02 16:01:16 -0600113 { \
114 int a_tmp = (a); \
115 int minv_tmp = (minv); \
116 int maxv_tmp = (maxv); \
117 if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
Eric Holmbergb8435c82012-06-05 14:51:29 -0600118 i += scnprintf(buf + i, max - i, \
119 "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
120 #a "(%d) > " #maxv "(%d)\n", \
121 __func__, __LINE__, \
Eric Holmberg527b66f2012-07-02 16:01:16 -0600122 a_tmp, minv_tmp, a_tmp, maxv_tmp); \
Eric Holmbergb8435c82012-06-05 14:51:29 -0600123 failed = 1; \
124 break; \
125 } \
Eric Holmberg527b66f2012-07-02 16:01:16 -0600126 }
Eric Holmbergb8435c82012-06-05 14:51:29 -0600127
128
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600129static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
130 89, 144, 233};
131
Eric Holmbergb8435c82012-06-05 14:51:29 -0600132/* when 1, forces failure of get_rx_buffer_mock function */
133static int get_rx_buffer_mock_fail;
134
135
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600136/* Used for mapping local to remote TIOCM signals */
137struct tiocm_test_vector {
138 uint32_t input;
139 uint32_t set_old;
140 uint32_t set_new;
141 uint32_t clr_old;
142};
143
144/**
145 * Allocates a new buffer for SMUX for every call.
146 */
Eric Holmberg837a9342012-10-29 14:12:34 -0600147static int get_rx_buffer(void *priv, void **pkt_priv, void **buffer, int size)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600148{
149 void *rx_buf;
150
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600151 rx_buf = kmalloc(size, GFP_KERNEL);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600152 *pkt_priv = (void *)0x1234;
153 *buffer = rx_buf;
154
155 return 0;
156}
157
158/* Test vector for packet tests. */
159struct test_vector {
160 const char *data;
161 const unsigned len;
162};
163
164/* Mock object metadata for SMUX_READ_DONE event */
165struct mock_read_event {
166 struct list_head list;
167 struct smux_meta_read meta;
168};
169
170/* Mock object metadata for SMUX_WRITE_DONE event */
171struct mock_write_event {
172 struct list_head list;
173 struct smux_meta_write meta;
174};
175
Eric Holmbergb8435c82012-06-05 14:51:29 -0600176/* Mock object metadata for get_rx_buffer failure event */
177struct mock_get_rx_buff_event {
178 struct list_head list;
179 int size;
180 unsigned long jiffies;
181};
182
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600183/* Mock object for all SMUX callback events */
184struct smux_mock_callback {
185 int cb_count;
186 struct completion cb_completion;
187 spinlock_t lock;
188
189 /* status changes */
190 int event_connected;
191 int event_disconnected;
192 int event_disconnected_ssr;
193 int event_low_wm;
194 int event_high_wm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600195 int event_rx_retry_high_wm;
196 int event_rx_retry_low_wm;
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530197 int event_local_closed;
198 int event_remote_closed;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600199
200 /* TIOCM changes */
201 int event_tiocm;
202 struct smux_meta_tiocm tiocm_meta;
203
204 /* read event data */
205 int event_read_done;
206 int event_read_failed;
207 struct list_head read_events;
208
Eric Holmbergb8435c82012-06-05 14:51:29 -0600209 /* read retry data */
210 int get_rx_buff_retry_count;
211 struct list_head get_rx_buff_retry_events;
212
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600213 /* write event data */
214 int event_write_done;
215 int event_write_failed;
216 struct list_head write_events;
217};
218
Eric Holmberg06011322012-07-06 18:17:03 -0600219static int get_rx_buffer_mock(void *priv, void **pkt_priv,
220 void **buffer, int size);
221
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600222/**
223 * Initialize mock callback data. Only call once.
224 *
225 * @cb Mock callback data
226 */
Eric Holmberg837a9342012-10-29 14:12:34 -0600227static void mock_cb_data_init(struct smux_mock_callback *cb)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228{
229 init_completion(&cb->cb_completion);
230 spin_lock_init(&cb->lock);
231 INIT_LIST_HEAD(&cb->read_events);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600232 INIT_LIST_HEAD(&cb->get_rx_buff_retry_events);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600233 INIT_LIST_HEAD(&cb->write_events);
234}
235
236/**
237 * Reset mock callback data to default values.
238 *
239 * @cb Mock callback data
240 *
241 * All packets are freed and counters reset to zero.
242 */
Eric Holmberg837a9342012-10-29 14:12:34 -0600243static void mock_cb_data_reset(struct smux_mock_callback *cb)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244{
245 cb->cb_count = 0;
246 INIT_COMPLETION(cb->cb_completion);
247 cb->event_connected = 0;
248 cb->event_disconnected = 0;
249 cb->event_disconnected_ssr = 0;
250 cb->event_low_wm = 0;
251 cb->event_high_wm = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600252 cb->event_rx_retry_high_wm = 0;
253 cb->event_rx_retry_low_wm = 0;
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530254 cb->event_local_closed = 0;
255 cb->event_remote_closed = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600256 cb->event_tiocm = 0;
257 cb->tiocm_meta.tiocm_old = 0;
258 cb->tiocm_meta.tiocm_new = 0;
259
260 cb->event_read_done = 0;
261 cb->event_read_failed = 0;
262 while (!list_empty(&cb->read_events)) {
263 struct mock_read_event *meta;
264 meta = list_first_entry(&cb->read_events,
265 struct mock_read_event,
266 list);
267 kfree(meta->meta.buffer);
268 list_del(&meta->list);
269 kfree(meta);
270 }
271
Eric Holmbergb8435c82012-06-05 14:51:29 -0600272 cb->get_rx_buff_retry_count = 0;
273 while (!list_empty(&cb->get_rx_buff_retry_events)) {
274 struct mock_get_rx_buff_event *meta;
275 meta = list_first_entry(&cb->get_rx_buff_retry_events,
276 struct mock_get_rx_buff_event,
277 list);
278 list_del(&meta->list);
279 kfree(meta);
280 }
281
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 cb->event_write_done = 0;
283 cb->event_write_failed = 0;
284 while (!list_empty(&cb->write_events)) {
285 struct mock_write_event *meta;
286 meta = list_first_entry(&cb->write_events,
287 struct mock_write_event,
288 list);
289 list_del(&meta->list);
290 kfree(meta);
291 }
292}
293
294/**
295 * Dump the values of the mock callback data for debug purposes.
296 *
297 * @cb Mock callback data
298 * @buf Print buffer
299 * @max Maximum number of characters to print
300 *
301 * @returns Number of characters added to buffer
302 */
303static int mock_cb_data_print(const struct smux_mock_callback *cb,
304 char *buf, int max)
305{
306 int i = 0;
307
308 i += scnprintf(buf + i, max - i,
309 "\tcb_count=%d\n"
310 "\tcb_completion.done=%d\n"
311 "\tevent_connected=%d\n"
312 "\tevent_disconnected=%d\n"
313 "\tevent_disconnected_ssr=%d\n"
314 "\tevent_low_wm=%d\n"
315 "\tevent_high_wm=%d\n"
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600316 "\tevent_rx_retry_high_wm=%d\n"
317 "\tevent_rx_retry_low_wm=%d\n"
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530318 "\tevent_local_closed=%d\n"
319 "\tevent_remote_closed=%d\n"
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600320 "\tevent_tiocm=%d\n"
321 "\tevent_read_done=%d\n"
322 "\tevent_read_failed=%d\n"
Eric Holmberg88373642012-07-02 14:16:08 -0600323 "\tread_events empty=%d\n"
Eric Holmbergb8435c82012-06-05 14:51:29 -0600324 "\tget_rx_retry=%d\n"
Eric Holmberg88373642012-07-02 14:16:08 -0600325 "\tget_rx_retry_events empty=%d\n"
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600326 "\tevent_write_done=%d\n"
327 "\tevent_write_failed=%d\n"
Eric Holmberg88373642012-07-02 14:16:08 -0600328 "\twrite_events empty=%d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600329 cb->cb_count,
330 cb->cb_completion.done,
331 cb->event_connected,
332 cb->event_disconnected,
333 cb->event_disconnected_ssr,
334 cb->event_low_wm,
335 cb->event_high_wm,
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600336 cb->event_rx_retry_high_wm,
337 cb->event_rx_retry_low_wm,
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530338 cb->event_local_closed,
339 cb->event_remote_closed,
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340 cb->event_tiocm,
341 cb->event_read_done,
342 cb->event_read_failed,
Eric Holmberg88373642012-07-02 14:16:08 -0600343 list_empty(&cb->read_events),
Eric Holmbergb8435c82012-06-05 14:51:29 -0600344 cb->get_rx_buff_retry_count,
Eric Holmberg88373642012-07-02 14:16:08 -0600345 list_empty(&cb->get_rx_buff_retry_events),
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600346 cb->event_write_done,
347 cb->event_write_failed,
348 list_empty(&cb->write_events)
349 );
350
351 return i;
352}
353
354/**
355 * Mock object event callback. Used to logs events for analysis in the unit
356 * tests.
357 */
Eric Holmberg837a9342012-10-29 14:12:34 -0600358static void smux_mock_cb(void *priv, int event, const void *metadata)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600359{
360 struct smux_mock_callback *cb_data_ptr;
361 struct mock_write_event *write_event_meta;
362 struct mock_read_event *read_event_meta;
363 unsigned long flags;
364
365 cb_data_ptr = (struct smux_mock_callback *)priv;
366 if (cb_data_ptr == NULL) {
367 pr_err("%s: invalid private data\n", __func__);
368 return;
369 }
370
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600371 switch (event) {
372 case SMUX_CONNECTED:
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600373 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600374 ++cb_data_ptr->event_connected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600375 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600376 break;
377
378 case SMUX_DISCONNECTED:
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600379 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600380 ++cb_data_ptr->event_disconnected;
381 cb_data_ptr->event_disconnected_ssr =
382 ((struct smux_meta_disconnected *)metadata)->is_ssr;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600383 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600384 break;
385
386 case SMUX_READ_DONE:
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600387 read_event_meta = kmalloc(sizeof(struct mock_read_event),
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600388 GFP_KERNEL);
389 spin_lock_irqsave(&cb_data_ptr->lock, flags);
390 ++cb_data_ptr->event_read_done;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600391 if (read_event_meta) {
392 read_event_meta->meta =
393 *(struct smux_meta_read *)metadata;
394 list_add_tail(&read_event_meta->list,
395 &cb_data_ptr->read_events);
396 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600397 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600398 break;
399
400 case SMUX_READ_FAIL:
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600401 read_event_meta = kmalloc(sizeof(struct mock_read_event),
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600402 GFP_KERNEL);
403 spin_lock_irqsave(&cb_data_ptr->lock, flags);
404 ++cb_data_ptr->event_read_failed;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600405 if (read_event_meta) {
Eric Holmbergb8435c82012-06-05 14:51:29 -0600406 if (metadata)
407 read_event_meta->meta =
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600408 *(struct smux_meta_read *)metadata;
Eric Holmbergb8435c82012-06-05 14:51:29 -0600409 else
410 memset(&read_event_meta->meta, 0x0,
411 sizeof(struct smux_meta_read));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600412 list_add_tail(&read_event_meta->list,
413 &cb_data_ptr->read_events);
414 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600415 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600416 break;
417
418 case SMUX_WRITE_DONE:
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600419 write_event_meta = kmalloc(sizeof(struct mock_write_event),
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600420 GFP_KERNEL);
421 spin_lock_irqsave(&cb_data_ptr->lock, flags);
422 ++cb_data_ptr->event_write_done;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600423 if (write_event_meta) {
424 write_event_meta->meta =
425 *(struct smux_meta_write *)metadata;
426 list_add_tail(&write_event_meta->list,
427 &cb_data_ptr->write_events);
428 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600429 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600430 break;
431
432 case SMUX_WRITE_FAIL:
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600433 write_event_meta = kmalloc(sizeof(struct mock_write_event),
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600434 GFP_KERNEL);
435 spin_lock_irqsave(&cb_data_ptr->lock, flags);
436 ++cb_data_ptr->event_write_failed;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600437 if (write_event_meta) {
438 write_event_meta->meta =
439 *(struct smux_meta_write *)metadata;
440 list_add_tail(&write_event_meta->list,
441 &cb_data_ptr->write_events);
442 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600443 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600444 break;
445
446 case SMUX_LOW_WM_HIT:
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600447 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600448 ++cb_data_ptr->event_low_wm;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600449 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600450 break;
451
452 case SMUX_HIGH_WM_HIT:
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600453 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600454 ++cb_data_ptr->event_high_wm;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600455 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600456 break;
457
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600458 case SMUX_RX_RETRY_HIGH_WM_HIT:
459 spin_lock_irqsave(&cb_data_ptr->lock, flags);
460 ++cb_data_ptr->event_rx_retry_high_wm;
461 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
462 break;
463
464 case SMUX_RX_RETRY_LOW_WM_HIT:
465 spin_lock_irqsave(&cb_data_ptr->lock, flags);
466 ++cb_data_ptr->event_rx_retry_low_wm;
467 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
468 break;
469
470
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600471 case SMUX_TIOCM_UPDATE:
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600472 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600473 ++cb_data_ptr->event_tiocm;
474 cb_data_ptr->tiocm_meta = *(struct smux_meta_tiocm *)metadata;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600475 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600476 break;
477
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530478 case SMUX_LOCAL_CLOSED:
479 spin_lock_irqsave(&cb_data_ptr->lock, flags);
480 ++cb_data_ptr->event_local_closed;
481 cb_data_ptr->event_disconnected_ssr =
482 ((struct smux_meta_disconnected *)metadata)->is_ssr;
483 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
484 break;
485
486 case SMUX_REMOTE_CLOSED:
487 spin_lock_irqsave(&cb_data_ptr->lock, flags);
488 ++cb_data_ptr->event_remote_closed;
489 cb_data_ptr->event_disconnected_ssr =
490 ((struct smux_meta_disconnected *)metadata)->is_ssr;
491 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
492 break;
493
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600494 default:
495 pr_err("%s: unknown event %d\n", __func__, event);
496 };
497
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600498 spin_lock_irqsave(&cb_data_ptr->lock, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600499 ++cb_data_ptr->cb_count;
500 complete(&cb_data_ptr->cb_completion);
501 spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
502}
503
504/**
505 * Test Read/write usage.
506 *
507 * @buf Output buffer for failure/status messages
508 * @max Size of @buf
509 * @vectors Test vector data (must end with NULL item)
510 * @name Name of the test case for failure messages
511 *
512 * Perform a sanity test consisting of opening a port, writing test packet(s),
513 * reading the response(s), and closing the port.
514 *
515 * The port should already be configured to use either local or remote
516 * loopback.
517 */
518static int smux_ut_basic_core(char *buf, int max,
519 const struct test_vector *vectors,
520 const char *name)
521{
522 int i = 0;
523 int failed = 0;
524 static struct smux_mock_callback cb_data;
525 static int cb_initialized;
526 int ret;
527
528 if (!cb_initialized)
529 mock_cb_data_init(&cb_data);
530
531 mock_cb_data_reset(&cb_data);
532 while (!failed) {
533 struct mock_write_event *write_event;
534 struct mock_read_event *read_event;
535
536 /* open port */
537 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
538 get_rx_buffer);
539 UT_ASSERT_INT(ret, ==, 0);
540 UT_ASSERT_INT(
541 (int)wait_for_completion_timeout(
542 &cb_data.cb_completion, HZ), >, 0);
543 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
544 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
545 mock_cb_data_reset(&cb_data);
546
547 /* write, read, and verify the test vector data */
548 for (; vectors->data != NULL; ++vectors) {
549 const char *test_data = vectors->data;
550 const unsigned test_len = vectors->len;
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600551 unsigned long long start_t;
552 unsigned long long end_t;
553 unsigned long long val;
554 unsigned long rem;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600555
556 i += scnprintf(buf + i, max - i,
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600557 "Writing vector %p len %d: ",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600558 test_data, test_len);
559
560 /* write data */
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600561 start_t = sched_clock();
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600562 msm_smux_write(SMUX_TEST_LCID, (void *)0xCAFEFACE,
563 test_data, test_len);
564 UT_ASSERT_INT(ret, ==, 0);
565 UT_ASSERT_INT(
566 (int)wait_for_completion_timeout(
567 &cb_data.cb_completion, HZ), >, 0);
568
569 /* wait for write and echo'd read to complete */
570 INIT_COMPLETION(cb_data.cb_completion);
571 if (cb_data.cb_count < 2)
572 UT_ASSERT_INT(
573 (int)wait_for_completion_timeout(
574 &cb_data.cb_completion, HZ),
575 >, 0);
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600576 end_t = sched_clock();
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600577
578 UT_ASSERT_INT(cb_data.cb_count, >=, 1);
579 UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
580 UT_ASSERT_INT(list_empty(&cb_data.write_events), ==, 0);
581
582 write_event = list_first_entry(&cb_data.write_events,
583 struct mock_write_event, list);
584 UT_ASSERT_PTR(write_event->meta.pkt_priv, ==,
585 (void *)0xCAFEFACE);
586 UT_ASSERT_PTR(write_event->meta.buffer, ==,
587 (void *)test_data);
588 UT_ASSERT_INT(write_event->meta.len, ==, test_len);
589
590 /* verify read event */
591 UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
592 UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
593 read_event = list_first_entry(&cb_data.read_events,
594 struct mock_read_event, list);
595 UT_ASSERT_PTR(read_event->meta.pkt_priv, ==,
596 (void *)0x1234);
597 UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
598
599 if (read_event->meta.len != test_len ||
600 memcmp(read_event->meta.buffer,
601 test_data, test_len)) {
602 /* data mismatch */
603 char linebuff[80];
604
605 hex_dump_to_buffer(test_data, test_len,
606 16, 1, linebuff, sizeof(linebuff), 1);
607 i += scnprintf(buf + i, max - i,
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600608 "Failed\nExpected:\n%s\n\n", linebuff);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600609
610 hex_dump_to_buffer(read_event->meta.buffer,
611 read_event->meta.len,
612 16, 1, linebuff, sizeof(linebuff), 1);
613 i += scnprintf(buf + i, max - i,
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600614 "Failed\nActual:\n%s\n", linebuff);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600615 failed = 1;
616 break;
617 }
Eric Holmberg6966d5d2012-10-11 11:13:46 -0600618
619 /* calculate throughput stats */
620 val = end_t - start_t;
621 rem = do_div(val, 1000);
622 i += scnprintf(buf + i, max - i,
623 "OK - %u us",
624 (unsigned int)val);
625
626 val = 1000000000LL * 2 * test_len;
627 rem = do_div(val, end_t - start_t);
628 i += scnprintf(buf + i, max - i,
629 " (%u kB/sec)\n", (unsigned int)val);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600630 mock_cb_data_reset(&cb_data);
631 }
632
633 /* close port */
634 ret = msm_smux_close(SMUX_TEST_LCID);
635 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530636 while (cb_data.cb_count < 3) {
637 UT_ASSERT_INT(
638 (int)wait_for_completion_timeout(
639 &cb_data.cb_completion, HZ),
640 >, 0);
641 INIT_COMPLETION(cb_data.cb_completion);
642 }
643 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600644 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
645 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530646 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
647 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600648 break;
649 }
650
651 if (!failed) {
652 i += scnprintf(buf + i, max - i, "\tOK\n");
653 } else {
654 pr_err("%s: Failed\n", name);
655 i += scnprintf(buf + i, max - i, "\tFailed\n");
656 i += mock_cb_data_print(&cb_data, buf + i, max - i);
657 msm_smux_close(SMUX_TEST_LCID);
658 }
659
660 mock_cb_data_reset(&cb_data);
661 return i;
662}
663
664/**
665 * Verify Basic Local Loopback Support
666 *
667 * Perform a sanity test consisting of opening a port in local loopback
668 * mode and writing a packet and reading the echo'd packet back.
669 */
670static int smux_ut_basic(char *buf, int max)
671{
672 const struct test_vector test_data[] = {
673 {"hello\0world\n", sizeof("hello\0world\n")},
674 {0, 0},
675 };
676 int i = 0;
677 int failed = 0;
678 int ret;
679
680 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
681 while (!failed) {
682 /* enable loopback mode */
683 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
684 SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
685 UT_ASSERT_INT(ret, ==, 0);
686
687 i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
688 break;
689 }
690
691 if (failed) {
692 pr_err("%s: Failed\n", __func__);
693 i += scnprintf(buf + i, max - i, "\tFailed\n");
694 }
695 return i;
696}
697
698/**
699 * Verify Basic Remote Loopback Support
700 *
701 * Perform a sanity test consisting of opening a port in remote loopback
702 * mode and writing a packet and reading the echo'd packet back.
703 */
704static int smux_ut_remote_basic(char *buf, int max)
705{
706 const struct test_vector test_data[] = {
707 {"hello\0world\n", sizeof("hello\0world\n")},
708 {0, 0},
709 };
710 int i = 0;
711 int failed = 0;
712 int ret;
713
714 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
715 while (!failed) {
716 /* enable remote mode */
717 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
718 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
719 UT_ASSERT_INT(ret, ==, 0);
720
721 i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
722 break;
723 }
724
725 if (failed) {
726 pr_err("%s: Failed\n", __func__);
727 i += scnprintf(buf + i, max - i, "\tFailed\n");
728 }
729 return i;
730}
731
732/**
Eric Holmberg06011322012-07-06 18:17:03 -0600733 * Verify Basic Subsystem Restart Support
734 *
735 * Run a basic loopback test followed by a subsystem restart and then another
736 * loopback test.
737 */
Eric Holmberg9c76e2e2013-05-21 18:08:06 -0600738static int smux_ut_ssr_remote_basic(char *buf, int max)
Eric Holmberg06011322012-07-06 18:17:03 -0600739{
740 const struct test_vector test_data[] = {
741 {"hello\0world\n", sizeof("hello\0world\n")},
742 {0, 0},
743 };
744 int i = 0;
745 int failed = 0;
Eric Holmberga43404f2012-08-28 15:31:01 -0600746 int retry_count = 0;
Eric Holmberg06011322012-07-06 18:17:03 -0600747 int ret;
748
749 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
750 while (!failed) {
751 /* enable remote mode */
752 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
753 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
754 UT_ASSERT_INT(ret, ==, 0);
755
756 i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
757 subsystem_restart("external_modem");
Eric Holmberga43404f2012-08-28 15:31:01 -0600758
759 do {
760 msleep(500);
761 ++retry_count;
762 UT_ASSERT_INT(retry_count, <, 20);
763 } while (!smux_remote_is_active() && !failed);
764
Eric Holmberg06011322012-07-06 18:17:03 -0600765 i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
766 break;
767 }
768
769 if (failed) {
770 pr_err("%s: Failed\n", __func__);
771 i += scnprintf(buf + i, max - i, "\tFailed\n");
772 }
773 return i;
774}
775
776/**
777 * Verify Subsystem Restart Support During Port Open
778 */
Eric Holmberg9c76e2e2013-05-21 18:08:06 -0600779static int smux_ut_ssr_remote_open(char *buf, int max)
Eric Holmberg06011322012-07-06 18:17:03 -0600780{
781 static struct smux_mock_callback cb_data;
782 static int cb_initialized;
783 int ret;
Eric Holmberga43404f2012-08-28 15:31:01 -0600784 int retry_count;
Eric Holmberg06011322012-07-06 18:17:03 -0600785 int i = 0;
786 int failed = 0;
787
788 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
789
790 if (!cb_initialized)
791 mock_cb_data_init(&cb_data);
792
793 mock_cb_data_reset(&cb_data);
794 while (!failed) {
795 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
796 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
797 UT_ASSERT_INT(ret, ==, 0);
798
799 /* open port */
800 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
801 get_rx_buffer);
802 UT_ASSERT_INT(ret, ==, 0);
803 UT_ASSERT_INT(
804 (int)wait_for_completion_timeout(
805 &cb_data.cb_completion, HZ), >, 0);
806 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
807 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
808 mock_cb_data_reset(&cb_data);
809
810 /* restart modem */
811 subsystem_restart("external_modem");
812
813 /* verify SSR events */
814 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530815 while (cb_data.cb_count < 3) {
816 UT_ASSERT_INT(
817 (int)wait_for_completion_timeout(
818 &cb_data.cb_completion, 10*HZ),
819 >, 0);
820 INIT_COMPLETION(cb_data.cb_completion);
821 }
822 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg06011322012-07-06 18:17:03 -0600823 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
824 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530825 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
826 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg06011322012-07-06 18:17:03 -0600827 mock_cb_data_reset(&cb_data);
828
829 /* close port */
830 ret = msm_smux_close(SMUX_TEST_LCID);
831 UT_ASSERT_INT(ret, ==, 0);
Eric Holmberga43404f2012-08-28 15:31:01 -0600832
833 /* wait for remote side to finish booting */
834 retry_count = 0;
835 do {
836 msleep(500);
837 ++retry_count;
838 UT_ASSERT_INT(retry_count, <, 20);
839 } while (!smux_remote_is_active() && !failed);
Eric Holmberg06011322012-07-06 18:17:03 -0600840 break;
841 }
842
843 if (!failed) {
844 i += scnprintf(buf + i, max - i, "\tOK\n");
845 } else {
846 pr_err("%s: Failed\n", __func__);
847 i += scnprintf(buf + i, max - i, "\tFailed\n");
848 i += mock_cb_data_print(&cb_data, buf + i, max - i);
849 msm_smux_close(SMUX_TEST_LCID);
850 }
851
852 mock_cb_data_reset(&cb_data);
853
854 return i;
855}
856
857/**
858 * Verify get_rx_buffer callback retry doesn't livelock SSR
859 * until all RX Bufffer Retries have timed out.
860 *
861 * @buf Buffer for status message
862 * @max Size of buffer
863 *
864 * @returns Number of bytes written to @buf
865 */
Eric Holmberg9c76e2e2013-05-21 18:08:06 -0600866static int smux_ut_ssr_remote_rx_buff_retry(char *buf, int max)
Eric Holmberg06011322012-07-06 18:17:03 -0600867{
868 static struct smux_mock_callback cb_data;
869 static int cb_initialized;
870 int i = 0;
871 int failed = 0;
Eric Holmberga43404f2012-08-28 15:31:01 -0600872 int retry_count;
Eric Holmberg06011322012-07-06 18:17:03 -0600873 int ret;
874
875 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
876 pr_err("%s", buf);
877
878 if (!cb_initialized)
879 mock_cb_data_init(&cb_data);
880
881 mock_cb_data_reset(&cb_data);
882 while (!failed) {
883 /* open port for loopback */
884 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
885 SMUX_CH_OPTION_REMOTE_LOOPBACK,
886 0);
887 UT_ASSERT_INT(ret, ==, 0);
888
889 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
890 smux_mock_cb, get_rx_buffer_mock);
891 UT_ASSERT_INT(ret, ==, 0);
892 UT_ASSERT_INT(
893 (int)wait_for_completion_timeout(
894 &cb_data.cb_completion, HZ), >, 0);
895 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
896 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
897 mock_cb_data_reset(&cb_data);
898
899 /* Queue up an RX buffer retry */
900 get_rx_buffer_mock_fail = 1;
901 ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
902 test_array, sizeof(test_array));
903 UT_ASSERT_INT(ret, ==, 0);
904 while (!cb_data.get_rx_buff_retry_count) {
905 UT_ASSERT_INT(
906 (int)wait_for_completion_timeout(
907 &cb_data.cb_completion, HZ),
908 >, 0);
909 INIT_COMPLETION(cb_data.cb_completion);
910 }
911 if (failed)
912 break;
913 mock_cb_data_reset(&cb_data);
914
915 /* trigger SSR */
916 subsystem_restart("external_modem");
917
918 /* verify SSR completed */
Eric Holmberga43404f2012-08-28 15:31:01 -0600919 retry_count = 0;
920 while (cb_data.event_disconnected_ssr == 0) {
921 (void)wait_for_completion_timeout(
922 &cb_data.cb_completion, HZ);
923 INIT_COMPLETION(cb_data.cb_completion);
924 ++retry_count;
925 UT_ASSERT_INT(retry_count, <, 10);
926 }
927 if (failed)
928 break;
Eric Holmberg06011322012-07-06 18:17:03 -0600929 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
930 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +0530931 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
932 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg06011322012-07-06 18:17:03 -0600933 mock_cb_data_reset(&cb_data);
934
935 /* close port */
936 ret = msm_smux_close(SMUX_TEST_LCID);
937 UT_ASSERT_INT(ret, ==, 0);
Eric Holmberga43404f2012-08-28 15:31:01 -0600938
939 /* wait for remote side to finish booting */
940 retry_count = 0;
941 do {
942 msleep(500);
943 ++retry_count;
944 UT_ASSERT_INT(retry_count, <, 20);
945 } while (!smux_remote_is_active() && !failed);
Eric Holmberg06011322012-07-06 18:17:03 -0600946 break;
947 }
948
949 if (!failed) {
950 i += scnprintf(buf + i, max - i, "\tOK\n");
951 } else {
952 pr_err("%s: Failed\n", __func__);
953 i += scnprintf(buf + i, max - i, "\tFailed\n");
954 i += mock_cb_data_print(&cb_data, buf + i, max - i);
955 msm_smux_close(SMUX_TEST_LCID);
956 }
957 mock_cb_data_reset(&cb_data);
958 return i;
959}
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600960
Eric Holmberg06011322012-07-06 18:17:03 -0600961/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600962 * Fill test pattern into provided buffer including an optional
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600963 * redzone before and after the buffer.
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600964 *
965 * buf ---------
966 * redzone
967 * --------- <- returned pointer
968 * data
969 * --------- <- returned pointer + len
970 * redzone
971 * ---------
972 *
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600973 * @buf Pointer to the buffer of size len or len+2*RED_ZONE_SIZE (redzone)
974 * @len Length of the *data* buffer (excluding the extra redzone buffers)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600975 * @redzone If true, adds redzone data
976 *
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600977 * @returns pointer to buffer (buf + RED_ZONE_SIZE if redzone enabled)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600978 */
Eric Holmberg837a9342012-10-29 14:12:34 -0600979static uint8_t *test_pattern_fill(char *buf, int len, int redzone)
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600980{
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600981 char *buf_ptr;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600982 uint8_t ch;
983
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600984 if (redzone) {
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600985 memset(buf, RED_ZONE_PRE_CH, RED_ZONE_SIZE);
986 buf += RED_ZONE_SIZE;
987 memset(buf + len, RED_ZONE_POS_CH, RED_ZONE_SIZE);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600988 }
989
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600990 for (ch = 0, buf_ptr = buf; len > 0; --len, ++ch)
991 *buf_ptr++ = (char)ch;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600992
Eric Holmberg0b2c02a2013-04-09 11:56:53 -0600993 return buf;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600994}
995
996/**
997 * Verify test pattern generated by test_pattern_fill.
998 *
999 * @buf_ptr Pointer to buffer pointer
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001000 * @len Length of the *data* buffer (excluding redzone bytes)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001001 * @redzone If true, verifies redzone and adjusts *buf_ptr
1002 * @errmsg Buffer for error message
1003 * @errmsg_max Size of error message buffer
1004 *
1005 * @returns 0 for success; length of error message otherwise
1006 */
Eric Holmberg837a9342012-10-29 14:12:34 -06001007static unsigned test_pattern_verify(char **buf_ptr, int len, int redzone,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001008 char *errmsg, int errmsg_max)
1009{
1010 int n;
1011 int i = 0;
1012 char linebuff[80];
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001013 char *zone_ptr;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001014
1015 if (redzone) {
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001016 *buf_ptr -= RED_ZONE_SIZE;
1017 zone_ptr = *buf_ptr;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001018
1019 /* verify prefix redzone */
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001020 for (n = 0; n < RED_ZONE_SIZE; ++n) {
1021 if (zone_ptr[n] != RED_ZONE_PRE_CH) {
1022 hex_dump_to_buffer(zone_ptr, RED_ZONE_SIZE,
1023 RED_ZONE_SIZE, 1, linebuff,
1024 sizeof(linebuff), 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001025 i += scnprintf(errmsg + i, errmsg_max - i,
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001026 "Pre-redzone violation: %s\n",
1027 linebuff);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001028 break;
1029 }
1030 }
1031
1032 /* verify postfix redzone */
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001033 zone_ptr = *buf_ptr + RED_ZONE_SIZE + len;
1034 for (n = 0; n < RED_ZONE_SIZE; ++n) {
1035 if (zone_ptr[n] != RED_ZONE_POS_CH) {
1036 hex_dump_to_buffer(zone_ptr, RED_ZONE_SIZE,
1037 RED_ZONE_SIZE, 1, linebuff,
1038 sizeof(linebuff), 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001039 i += scnprintf(errmsg + i, errmsg_max - i,
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001040 "Post-redzone violation: %s\n",
1041 linebuff);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001042 break;
1043 }
1044 }
1045 }
1046 return i;
1047}
1048
1049/**
1050 * Write a multiple packets in ascending size and verify packet is received
1051 * correctly.
1052 *
1053 * @buf Buffer for status message
1054 * @max Size of buffer
1055 * @name Name of the test for error reporting
1056 *
1057 * @returns Number of bytes written to @buf
1058 *
1059 * Requires that the port already be opened and loopback mode is
1060 * configured correctly (if required).
1061 */
1062static int smux_ut_loopback_big_pkt(char *buf, int max, const char *name)
1063{
1064 struct test_vector test_data[] = {
1065 {0, 64},
1066 {0, 128},
1067 {0, 256},
1068 {0, 512},
1069 {0, 1024},
Eric Holmberg6966d5d2012-10-11 11:13:46 -06001070 {0, 1500},
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001071 {0, 2048},
1072 {0, 4096},
1073 {0, 0},
1074 };
1075 int i = 0;
1076 int failed = 0;
1077 struct test_vector *tv;
1078
1079 /* generate test data */
1080 for (tv = test_data; tv->len > 0; ++tv) {
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001081 tv->data = kmalloc(tv->len + 2 * RED_ZONE_SIZE, GFP_KERNEL);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001082 if (!tv->data) {
1083 i += scnprintf(buf + i, max - i,
1084 "%s: Unable to allocate %d bytes\n",
1085 __func__, tv->len);
1086 failed = 1;
1087 goto out;
1088 }
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001089 tv->data = test_pattern_fill((uint8_t *)tv->data, tv->len, 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001090 }
1091
1092 /* run test */
1093 i += scnprintf(buf + i, max - i, "Running %s\n", name);
1094 while (!failed) {
1095 i += smux_ut_basic_core(buf + i, max - i, test_data, name);
1096 break;
1097 }
1098
1099out:
1100 if (failed) {
1101 pr_err("%s: Failed\n", name);
1102 i += scnprintf(buf + i, max - i, "\tFailed\n");
1103 }
1104
1105 for (tv = test_data; tv->len > 0; ++tv) {
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001106 if (tv->data) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001107 i += test_pattern_verify((char **)&tv->data,
1108 tv->len, 1, buf + i, max - i);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001109 kfree(tv->data);
1110 }
1111 }
1112
1113 return i;
1114}
1115
1116/**
1117 * Verify Large-packet Local Loopback Support.
1118 *
1119 * @buf Buffer for status message
1120 * @max Size of buffer
1121 *
1122 * @returns Number of bytes written to @buf
1123 *
1124 * Open port in local loopback mode and write a multiple packets in ascending
1125 * size and verify packet is received correctly.
1126 */
1127static int smux_ut_local_big_pkt(char *buf, int max)
1128{
1129 int i = 0;
1130 int ret;
1131
1132 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1133 SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
1134
1135 if (ret == 0) {
1136 smux_byte_loopback = SMUX_TEST_LCID;
1137 i += smux_ut_loopback_big_pkt(buf, max, __func__);
1138 smux_byte_loopback = 0;
1139 } else {
1140 i += scnprintf(buf + i, max - i,
1141 "%s: Unable to set loopback mode\n",
1142 __func__);
1143 }
1144
1145 return i;
1146}
1147
1148/**
1149 * Verify Large-packet Remote Loopback Support.
1150 *
1151 * @buf Buffer for status message
1152 * @max Size of buffer
1153 *
1154 * @returns Number of bytes written to @buf
1155 *
1156 * Open port in remote loopback mode and write a multiple packets in ascending
1157 * size and verify packet is received correctly.
1158 */
1159static int smux_ut_remote_big_pkt(char *buf, int max)
1160{
1161 int i = 0;
1162 int ret;
1163
1164 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1165 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
1166 if (ret == 0) {
1167 i += smux_ut_loopback_big_pkt(buf, max, __func__);
1168 } else {
1169 i += scnprintf(buf + i, max - i,
1170 "%s: Unable to set loopback mode\n",
1171 __func__);
1172 }
1173
1174 return i;
1175}
1176
1177/**
Eric Holmberg6966d5d2012-10-11 11:13:46 -06001178 * Run a large packet test for throughput metrics.
1179 *
1180 * Repeatedly send a packet for 100 iterations to get throughput metrics.
1181 */
1182static int smux_ut_remote_throughput(char *buf, int max)
1183{
1184 struct test_vector test_data[] = {
1185 {0, 1500},
1186 {0, 0},
1187 };
1188 int failed = 0;
1189 int i = 0;
1190 int loop = 0;
1191 struct test_vector *tv;
1192 int ret;
1193
1194 /* generate test data */
1195 for (tv = test_data; tv->len > 0; ++tv) {
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001196 tv->data = kmalloc(tv->len, GFP_KERNEL);
Eric Holmberg6966d5d2012-10-11 11:13:46 -06001197 if (!tv->data) {
1198 i += scnprintf(buf + i, max - i,
1199 "%s: Unable to allocate %d bytes\n",
1200 __func__, tv->len);
1201 failed = 1;
1202 goto out;
1203 }
Eric Holmberg0b2c02a2013-04-09 11:56:53 -06001204 test_pattern_fill((uint8_t *)tv->data, tv->len, 0);
Eric Holmberg6966d5d2012-10-11 11:13:46 -06001205 }
1206
1207 /* run test */
1208 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
1209 while (!failed && loop < 100) {
1210 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1211 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
1212 UT_ASSERT_INT(ret, ==, 0);
1213
1214 i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
1215 ++loop;
1216 }
1217
1218out:
1219 if (failed) {
1220 pr_err("%s: Failed\n", __func__);
1221 i += scnprintf(buf + i, max - i, "\tFailed\n");
1222 }
1223
1224 for (tv = test_data; tv->len > 0; ++tv)
1225 kfree(tv->data);
1226
1227 return i;
1228}
1229
1230/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001231 * Verify set and get operations for each TIOCM bit.
1232 *
1233 * @buf Buffer for status message
1234 * @max Size of buffer
1235 * @name Name of the test for error reporting
1236 *
1237 * @returns Number of bytes written to @buf
1238 */
1239static int smux_ut_tiocm(char *buf, int max, const char *name)
1240{
1241 static struct smux_mock_callback cb_data;
1242 static int cb_initialized;
1243 static const struct tiocm_test_vector tiocm_vectors[] = {
1244 /* bit to set, set old, set new, clear old */
1245 {TIOCM_DTR, TIOCM_DTR, TIOCM_DTR | TIOCM_DSR, TIOCM_DSR},
1246 {TIOCM_RTS, TIOCM_RTS, TIOCM_RTS | TIOCM_CTS, TIOCM_CTS},
1247 {TIOCM_RI, 0x0, TIOCM_RI, TIOCM_RI},
1248 {TIOCM_CD, 0x0, TIOCM_CD, TIOCM_CD},
1249 };
1250 int i = 0;
1251 int failed = 0;
1252 int n;
1253 int ret;
1254
1255 i += scnprintf(buf + i, max - i, "Running %s\n", name);
1256
1257 if (!cb_initialized)
1258 mock_cb_data_init(&cb_data);
1259
1260 mock_cb_data_reset(&cb_data);
1261 while (!failed) {
1262 /* open port */
1263 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
1264 get_rx_buffer);
1265 UT_ASSERT_INT(ret, ==, 0);
1266 UT_ASSERT_INT(
1267 (int)wait_for_completion_timeout(
1268 &cb_data.cb_completion, HZ), >, 0);
1269 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1270 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
1271 mock_cb_data_reset(&cb_data);
1272
1273 /* set and clear each TIOCM bit */
1274 for (n = 0; n < ARRAY_SIZE(tiocm_vectors) && !failed; ++n) {
1275 /* set signal and verify */
1276 ret = msm_smux_tiocm_set(SMUX_TEST_LCID,
1277 tiocm_vectors[n].input, 0x0);
1278 UT_ASSERT_INT(ret, ==, 0);
1279 UT_ASSERT_INT(
1280 (int)wait_for_completion_timeout(
1281 &cb_data.cb_completion, HZ), >, 0);
1282 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1283 UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
1284 UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
1285 tiocm_vectors[n].set_old);
1286 UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==,
1287 tiocm_vectors[n].set_new);
1288 mock_cb_data_reset(&cb_data);
1289
1290 /* clear signal and verify */
1291 ret = msm_smux_tiocm_set(SMUX_TEST_LCID, 0x0,
1292 tiocm_vectors[n].input);
1293 UT_ASSERT_INT(ret, ==, 0);
1294 UT_ASSERT_INT(
1295 (int)wait_for_completion_timeout(
1296 &cb_data.cb_completion, HZ),
1297 >, 0);
1298 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1299 UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
1300 UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
1301 tiocm_vectors[n].clr_old);
1302 UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==, 0x0);
1303 mock_cb_data_reset(&cb_data);
1304 }
1305 if (failed)
1306 break;
1307
1308 /* close port */
1309 ret = msm_smux_close(SMUX_TEST_LCID);
1310 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301311 while (cb_data.cb_count < 3) {
1312 UT_ASSERT_INT(
1313 (int)wait_for_completion_timeout(
1314 &cb_data.cb_completion, HZ),
1315 >, 0);
1316 INIT_COMPLETION(cb_data.cb_completion);
1317 }
1318 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001319 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
1320 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301321 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
1322 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001323 break;
1324 }
1325
1326 if (!failed) {
1327 i += scnprintf(buf + i, max - i, "\tOK\n");
1328 } else {
1329 pr_err("%s: Failed\n", name);
1330 i += scnprintf(buf + i, max - i, "\tFailed\n");
1331 i += mock_cb_data_print(&cb_data, buf + i, max - i);
1332 msm_smux_close(SMUX_TEST_LCID);
1333 }
1334
1335 mock_cb_data_reset(&cb_data);
1336 return i;
1337}
1338
1339/**
1340 * Verify TIOCM Status Bits for local loopback.
1341 *
1342 * @buf Buffer for status message
1343 * @max Size of buffer
1344 *
1345 * @returns Number of bytes written to @buf
1346 */
1347static int smux_ut_local_tiocm(char *buf, int max)
1348{
1349 int i = 0;
1350 int ret;
1351
1352 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1353 SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
1354
1355 if (ret == 0) {
1356 smux_byte_loopback = SMUX_TEST_LCID;
1357 i += smux_ut_tiocm(buf, max, __func__);
1358 smux_byte_loopback = 0;
1359 } else {
1360 i += scnprintf(buf + i, max - i,
1361 "%s: Unable to set loopback mode\n",
1362 __func__);
1363 }
1364
1365 return i;
1366}
1367
1368/**
1369 * Verify TIOCM Status Bits for remote loopback.
1370 *
1371 * @buf Buffer for status message
1372 * @max Size of buffer
1373 *
1374 * @returns Number of bytes written to @buf
1375 */
1376static int smux_ut_remote_tiocm(char *buf, int max)
1377{
1378 int i = 0;
1379 int ret;
1380
1381 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1382 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
1383 if (ret == 0) {
1384 i += smux_ut_tiocm(buf, max, __func__);
1385 } else {
1386 i += scnprintf(buf + i, max - i,
1387 "%s: Unable to set loopback mode\n",
1388 __func__);
1389 }
1390
1391 return i;
1392}
1393
1394/**
1395 * Verify High/Low Watermark notifications.
1396 *
1397 * @buf Buffer for status message
1398 * @max Size of buffer
1399 *
1400 * @returns Number of bytes written to @buf
1401 */
1402static int smux_ut_local_wm(char *buf, int max)
1403{
1404 static struct smux_mock_callback cb_data;
1405 static int cb_initialized;
1406 int i = 0;
1407 int failed = 0;
1408 int ret;
1409
1410 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
1411 pr_err("%s", buf);
1412
1413 if (!cb_initialized)
1414 mock_cb_data_init(&cb_data);
1415
1416 mock_cb_data_reset(&cb_data);
1417 smux_byte_loopback = SMUX_TEST_LCID;
1418 while (!failed) {
1419 /* open port for loopback with TX disabled */
1420 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1421 SMUX_CH_OPTION_LOCAL_LOOPBACK
1422 | SMUX_CH_OPTION_REMOTE_TX_STOP,
1423 0);
1424 UT_ASSERT_INT(ret, ==, 0);
1425
1426 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
1427 get_rx_buffer);
1428 UT_ASSERT_INT(ret, ==, 0);
1429 UT_ASSERT_INT(
1430 (int)wait_for_completion_timeout(
1431 &cb_data.cb_completion, HZ), >, 0);
1432 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1433 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
1434 mock_cb_data_reset(&cb_data);
1435
1436 /* transmit 4 packets and verify high-watermark notification */
1437 ret = 0;
1438 ret |= msm_smux_write(SMUX_TEST_LCID, (void *)1,
1439 test_array, sizeof(test_array));
1440 ret |= msm_smux_write(SMUX_TEST_LCID, (void *)2,
1441 test_array, sizeof(test_array));
1442 ret |= msm_smux_write(SMUX_TEST_LCID, (void *)3,
1443 test_array, sizeof(test_array));
1444 UT_ASSERT_INT(ret, ==, 0);
1445 UT_ASSERT_INT(cb_data.cb_count, ==, 0);
1446 UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
1447
1448 ret = msm_smux_write(SMUX_TEST_LCID, (void *)4,
1449 test_array, sizeof(test_array));
1450 UT_ASSERT_INT(ret, ==, 0);
1451 UT_ASSERT_INT(
1452 (int)wait_for_completion_timeout(
1453 &cb_data.cb_completion, HZ),
1454 >, 0);
1455 UT_ASSERT_INT(cb_data.event_high_wm, ==, 1);
1456 UT_ASSERT_INT(cb_data.event_low_wm, ==, 0);
1457 mock_cb_data_reset(&cb_data);
1458
1459 /* exceed watermark and verify failure return value */
1460 ret = msm_smux_write(SMUX_TEST_LCID, (void *)5,
1461 test_array, sizeof(test_array));
1462 UT_ASSERT_INT(ret, ==, -EAGAIN);
1463
1464 /* re-enable TX and verify low-watermark notification */
1465 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1466 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
1467 UT_ASSERT_INT(ret, ==, 0);
1468 while (cb_data.cb_count < 9) {
1469 UT_ASSERT_INT(
1470 (int)wait_for_completion_timeout(
1471 &cb_data.cb_completion, HZ),
1472 >, 0);
1473 INIT_COMPLETION(cb_data.cb_completion);
1474 }
1475 if (failed)
1476 break;
1477
1478 UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
1479 UT_ASSERT_INT(cb_data.event_low_wm, ==, 1);
1480 UT_ASSERT_INT(cb_data.event_write_done, ==, 4);
1481 mock_cb_data_reset(&cb_data);
1482
1483 /* close port */
1484 ret = msm_smux_close(SMUX_TEST_LCID);
1485 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301486 while (cb_data.cb_count < 3) {
1487 UT_ASSERT_INT(
1488 (int)wait_for_completion_timeout(
1489 &cb_data.cb_completion, HZ),
1490 >, 0);
1491 INIT_COMPLETION(cb_data.cb_completion);
1492 }
1493 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001494 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
1495 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301496 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
1497 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001498 break;
1499 }
1500
1501 if (!failed) {
1502 i += scnprintf(buf + i, max - i, "\tOK\n");
1503 } else {
1504 pr_err("%s: Failed\n", __func__);
1505 i += scnprintf(buf + i, max - i, "\tFailed\n");
1506 i += mock_cb_data_print(&cb_data, buf + i, max - i);
1507 msm_smux_close(SMUX_TEST_LCID);
1508 }
1509 smux_byte_loopback = 0;
1510 mock_cb_data_reset(&cb_data);
1511 return i;
1512}
1513
1514/**
1515 * Verify smuxld_receive_buf regular and error processing.
1516 *
1517 * @buf Buffer for status message
1518 * @max Size of buffer
1519 *
1520 * @returns Number of bytes written to @buf
1521 */
1522static int smux_ut_local_smuxld_receive_buf(char *buf, int max)
1523{
1524 static struct smux_mock_callback cb_data;
1525 static int cb_initialized;
1526 struct mock_read_event *meta;
1527 int i = 0;
1528 int failed = 0;
1529 int ret;
1530 char data[] = {SMUX_UT_ECHO_REQ,
1531 SMUX_UT_ECHO_REQ, SMUX_UT_ECHO_REQ,
1532 };
1533 char flags[] = {0x0, 0x1, 0x0,};
1534
1535
1536 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
1537
1538 if (!cb_initialized)
1539 mock_cb_data_init(&cb_data);
1540
1541 mock_cb_data_reset(&cb_data);
1542 smux_byte_loopback = SMUX_TEST_LCID;
1543 while (!failed) {
1544 /* open port for loopback */
1545 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1546 SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
1547 UT_ASSERT_INT(ret, ==, 0);
1548
1549 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
1550 get_rx_buffer);
1551 UT_ASSERT_INT(ret, ==, 0);
1552 UT_ASSERT_INT(
1553 (int)wait_for_completion_timeout(
1554 &cb_data.cb_completion, HZ), >, 0);
1555 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1556 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
1557 mock_cb_data_reset(&cb_data);
1558
1559 /*
1560 * Verify RX error processing by sending 3 echo requests:
1561 * one OK, one fail, and a final OK
1562 *
1563 * The parsing framework should process the requests
1564 * and send us three BYTE command packets with
1565 * ECHO ACK FAIL and ECHO ACK OK characters.
1566 */
1567 smuxld_receive_buf(0, data, flags, sizeof(data));
1568
1569 /* verify response characters */
1570 do {
1571 UT_ASSERT_INT(
1572 (int)wait_for_completion_timeout(
1573 &cb_data.cb_completion, HZ), >, 0);
1574 INIT_COMPLETION(cb_data.cb_completion);
1575 } while (cb_data.cb_count < 3);
1576 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
1577 UT_ASSERT_INT(cb_data.event_read_done, ==, 3);
1578
1579 meta = list_first_entry(&cb_data.read_events,
1580 struct mock_read_event, list);
1581 UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
1582 SMUX_UT_ECHO_ACK_OK);
1583 list_del(&meta->list);
1584
1585 meta = list_first_entry(&cb_data.read_events,
1586 struct mock_read_event, list);
1587 UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
1588 SMUX_UT_ECHO_ACK_FAIL);
1589 list_del(&meta->list);
1590
1591 meta = list_first_entry(&cb_data.read_events,
1592 struct mock_read_event, list);
1593 UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
1594 SMUX_UT_ECHO_ACK_OK);
1595 list_del(&meta->list);
1596 mock_cb_data_reset(&cb_data);
1597
1598 /* close port */
1599 ret = msm_smux_close(SMUX_TEST_LCID);
1600 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301601 while (cb_data.cb_count < 3) {
1602 UT_ASSERT_INT(
1603 (int)wait_for_completion_timeout(
1604 &cb_data.cb_completion, HZ),
1605 >, 0);
1606 INIT_COMPLETION(cb_data.cb_completion);
1607 }
1608 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001609 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
1610 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301611 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
1612 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001613 break;
1614 }
1615
1616 if (!failed) {
1617 i += scnprintf(buf + i, max - i, "\tOK\n");
1618 } else {
1619 pr_err("%s: Failed\n", __func__);
1620 i += scnprintf(buf + i, max - i, "\tFailed\n");
1621 i += mock_cb_data_print(&cb_data, buf + i, max - i);
1622 msm_smux_close(SMUX_TEST_LCID);
1623 }
1624 smux_byte_loopback = 0;
1625 mock_cb_data_reset(&cb_data);
1626 return i;
1627}
1628
Eric Holmbergb8435c82012-06-05 14:51:29 -06001629/**
1630 * Allocates a new buffer or returns a failure based upon the
1631 * global @get_rx_buffer_mock_fail.
1632 */
1633static int get_rx_buffer_mock(void *priv, void **pkt_priv,
1634 void **buffer, int size)
1635{
1636 void *rx_buf;
1637 unsigned long flags;
1638 struct smux_mock_callback *cb_ptr;
1639
1640 cb_ptr = (struct smux_mock_callback *)priv;
1641 if (!cb_ptr) {
1642 pr_err("%s: no callback data\n", __func__);
1643 return -ENXIO;
1644 }
1645
1646 if (get_rx_buffer_mock_fail) {
1647 /* force failure and log failure event */
1648 struct mock_get_rx_buff_event *meta;
1649 meta = kmalloc(sizeof(struct mock_get_rx_buff_event),
1650 GFP_KERNEL);
1651 if (!meta) {
1652 pr_err("%s: unable to allocate metadata\n", __func__);
1653 return -ENOMEM;
1654 }
1655 INIT_LIST_HEAD(&meta->list);
1656 meta->size = size;
1657 meta->jiffies = jiffies;
1658
1659 spin_lock_irqsave(&cb_ptr->lock, flags);
1660 ++cb_ptr->get_rx_buff_retry_count;
1661 list_add_tail(&meta->list, &cb_ptr->get_rx_buff_retry_events);
1662 ++cb_ptr->cb_count;
1663 complete(&cb_ptr->cb_completion);
1664 spin_unlock_irqrestore(&cb_ptr->lock, flags);
1665 return -EAGAIN;
1666 } else {
1667 rx_buf = kmalloc(size, GFP_KERNEL);
1668 *pkt_priv = (void *)0x1234;
1669 *buffer = rx_buf;
1670 return 0;
1671 }
1672 return 0;
1673}
1674
1675/**
1676 * Verify get_rx_buffer callback retry.
1677 *
1678 * @buf Buffer for status message
1679 * @max Size of buffer
1680 *
1681 * @returns Number of bytes written to @buf
1682 */
1683static int smux_ut_local_get_rx_buff_retry(char *buf, int max)
1684{
1685 static struct smux_mock_callback cb_data;
1686 static int cb_initialized;
1687 int i = 0;
1688 int failed = 0;
1689 char try_two[] = "try 2";
1690 int ret;
1691 unsigned long start_j;
1692 struct mock_get_rx_buff_event *event;
1693 struct mock_read_event *read_event;
1694 int try;
1695
1696 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
1697 pr_err("%s", buf);
1698
1699 if (!cb_initialized)
1700 mock_cb_data_init(&cb_data);
1701
1702 mock_cb_data_reset(&cb_data);
1703 smux_byte_loopback = SMUX_TEST_LCID;
1704 while (!failed) {
1705 /* open port for loopback */
1706 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1707 SMUX_CH_OPTION_LOCAL_LOOPBACK,
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001708 SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001709 UT_ASSERT_INT(ret, ==, 0);
1710
1711 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
1712 smux_mock_cb, get_rx_buffer_mock);
1713 UT_ASSERT_INT(ret, ==, 0);
1714 UT_ASSERT_INT(
1715 (int)wait_for_completion_timeout(
1716 &cb_data.cb_completion, HZ), >, 0);
1717 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
1718 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
1719 mock_cb_data_reset(&cb_data);
1720
1721 /*
1722 * Force get_rx_buffer failure for a single RX packet
1723 *
1724 * The get_rx_buffer calls should follow an exponential
1725 * back-off with a maximum timeout of 1024 ms after which we
1726 * will get a failure notification.
1727 *
1728 * Try Post Delay (ms)
1729 * 0 -
1730 * 1 1
1731 * 2 2
1732 * 3 4
1733 * 4 8
1734 * 5 16
1735 * 6 32
1736 * 7 64
1737 * 8 128
1738 * 9 256
1739 * 10 512
1740 * 11 1024
1741 * 12 Fail
1742 *
1743 * All times are limited by the precision of the timer
1744 * framework, so ranges are used in the test
1745 * verification.
1746 */
1747 get_rx_buffer_mock_fail = 1;
1748 start_j = jiffies;
1749 ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
1750 test_array, sizeof(test_array));
1751 UT_ASSERT_INT(ret, ==, 0);
1752 ret = msm_smux_write(SMUX_TEST_LCID, (void *)2,
1753 try_two, sizeof(try_two));
1754 UT_ASSERT_INT(ret, ==, 0);
1755
1756 /* wait for RX failure event */
1757 while (cb_data.event_read_failed == 0) {
1758 UT_ASSERT_INT(
1759 (int)wait_for_completion_timeout(
1760 &cb_data.cb_completion, 2*HZ),
1761 >, 0);
1762 INIT_COMPLETION(cb_data.cb_completion);
1763 }
1764 if (failed)
1765 break;
1766
1767 /* verify retry attempts */
1768 UT_ASSERT_INT(cb_data.get_rx_buff_retry_count, ==, 12);
1769 event = list_first_entry(&cb_data.get_rx_buff_retry_events,
1770 struct mock_get_rx_buff_event, list);
1771 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1772 jiffies_to_msecs(event->jiffies - start_j));
1773 UT_ASSERT_INT_IN_RANGE(
1774 jiffies_to_msecs(event->jiffies - start_j),
1775 0, 0 + 20);
1776 start_j = event->jiffies;
1777
1778 event = list_first_entry(&event->list,
1779 struct mock_get_rx_buff_event, list);
1780 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1781 jiffies_to_msecs(event->jiffies - start_j));
1782 UT_ASSERT_INT_IN_RANGE(
1783 jiffies_to_msecs(event->jiffies - start_j),
1784 1, 1 + 20);
1785 start_j = event->jiffies;
1786
1787 event = list_first_entry(&event->list,
1788 struct mock_get_rx_buff_event, list);
1789 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1790 jiffies_to_msecs(event->jiffies - start_j));
1791 UT_ASSERT_INT_IN_RANGE(
1792 jiffies_to_msecs(event->jiffies - start_j),
1793 2, 2 + 20);
1794 start_j = event->jiffies;
1795
1796 event = list_first_entry(&event->list,
1797 struct mock_get_rx_buff_event, list);
1798 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1799 jiffies_to_msecs(event->jiffies - start_j));
1800 UT_ASSERT_INT_IN_RANGE(
1801 jiffies_to_msecs(event->jiffies - start_j),
1802 4, 4 + 20);
1803 start_j = event->jiffies;
1804
1805 event = list_first_entry(&event->list,
1806 struct mock_get_rx_buff_event, list);
1807 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1808 jiffies_to_msecs(event->jiffies - start_j));
1809 UT_ASSERT_INT_IN_RANGE(
1810 jiffies_to_msecs(event->jiffies - start_j),
1811 8, 8 + 20);
1812 start_j = event->jiffies;
1813
1814 event = list_first_entry(&event->list,
1815 struct mock_get_rx_buff_event, list);
1816 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1817 jiffies_to_msecs(event->jiffies - start_j));
1818 UT_ASSERT_INT_IN_RANGE(
1819 jiffies_to_msecs(event->jiffies - start_j),
1820 16, 16 + 20);
1821 start_j = event->jiffies;
1822
1823 event = list_first_entry(&event->list,
1824 struct mock_get_rx_buff_event, list);
1825 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1826 jiffies_to_msecs(event->jiffies - start_j));
1827 UT_ASSERT_INT_IN_RANGE(
1828 jiffies_to_msecs(event->jiffies - start_j),
1829 32 - 20, 32 + 20);
1830 start_j = event->jiffies;
1831
1832 event = list_first_entry(&event->list,
1833 struct mock_get_rx_buff_event, list);
1834 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1835 jiffies_to_msecs(event->jiffies - start_j));
1836 UT_ASSERT_INT_IN_RANGE(
1837 jiffies_to_msecs(event->jiffies - start_j),
1838 64 - 20, 64 + 20);
1839 start_j = event->jiffies;
1840
1841 event = list_first_entry(&event->list,
1842 struct mock_get_rx_buff_event, list);
1843 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1844 jiffies_to_msecs(event->jiffies - start_j));
1845 UT_ASSERT_INT_IN_RANGE(
1846 jiffies_to_msecs(event->jiffies - start_j),
1847 128 - 20, 128 + 20);
1848 start_j = event->jiffies;
1849
1850 event = list_first_entry(&event->list,
1851 struct mock_get_rx_buff_event, list);
1852 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1853 jiffies_to_msecs(event->jiffies - start_j));
1854 UT_ASSERT_INT_IN_RANGE(
1855 jiffies_to_msecs(event->jiffies - start_j),
1856 256 - 20, 256 + 20);
1857 start_j = event->jiffies;
1858
1859 event = list_first_entry(&event->list,
1860 struct mock_get_rx_buff_event, list);
1861 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1862 jiffies_to_msecs(event->jiffies - start_j));
1863 UT_ASSERT_INT_IN_RANGE(
1864 jiffies_to_msecs(event->jiffies - start_j),
1865 512 - 20, 512 + 20);
1866 start_j = event->jiffies;
1867
1868 event = list_first_entry(&event->list,
1869 struct mock_get_rx_buff_event, list);
1870 pr_err("%s: event->jiffies = %d (ms)\n", __func__,
1871 jiffies_to_msecs(event->jiffies - start_j));
1872 UT_ASSERT_INT_IN_RANGE(
1873 jiffies_to_msecs(event->jiffies - start_j),
1874 1024 - 20, 1024 + 20);
1875 mock_cb_data_reset(&cb_data);
1876
1877 /* verify 2nd pending RX packet goes through */
1878 get_rx_buffer_mock_fail = 0;
1879 INIT_COMPLETION(cb_data.cb_completion);
1880 if (cb_data.event_read_done == 0)
1881 UT_ASSERT_INT(
1882 (int)wait_for_completion_timeout(
1883 &cb_data.cb_completion, HZ),
1884 >, 0);
1885 UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
1886 UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
1887 read_event = list_first_entry(&cb_data.read_events,
1888 struct mock_read_event, list);
1889 UT_ASSERT_PTR(read_event->meta.pkt_priv, ==, (void *)0x1234);
1890 UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
1891 UT_ASSERT_INT(0, ==, memcmp(read_event->meta.buffer, try_two,
1892 sizeof(try_two)));
1893 mock_cb_data_reset(&cb_data);
1894
1895 /* Test maximum retry queue size */
1896 get_rx_buffer_mock_fail = 1;
1897 for (try = 0; try < (SMUX_RX_RETRY_MAX_PKTS + 1); ++try) {
1898 mock_cb_data_reset(&cb_data);
1899 ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
1900 test_array, sizeof(test_array));
1901 UT_ASSERT_INT(ret, ==, 0);
1902 UT_ASSERT_INT(
1903 (int)wait_for_completion_timeout(
1904 &cb_data.cb_completion, HZ),
1905 >, 0);
1906 }
1907
1908 /* should have 32 successful rx packets and 1 failed */
1909 while (cb_data.event_read_failed == 0) {
1910 UT_ASSERT_INT(
1911 (int)wait_for_completion_timeout(
1912 &cb_data.cb_completion, 2*HZ),
1913 >, 0);
1914 INIT_COMPLETION(cb_data.cb_completion);
1915 }
1916 if (failed)
1917 break;
1918
1919 get_rx_buffer_mock_fail = 0;
1920 while (cb_data.event_read_done < SMUX_RX_RETRY_MAX_PKTS) {
1921 UT_ASSERT_INT(
1922 (int)wait_for_completion_timeout(
1923 &cb_data.cb_completion, 2*HZ),
1924 >, 0);
1925 INIT_COMPLETION(cb_data.cb_completion);
1926 }
1927 if (failed)
1928 break;
1929
1930 UT_ASSERT_INT(1, ==, cb_data.event_read_failed);
1931 UT_ASSERT_INT(SMUX_RX_RETRY_MAX_PKTS, ==,
1932 cb_data.event_read_done);
1933 mock_cb_data_reset(&cb_data);
1934
1935 /* close port */
1936 ret = msm_smux_close(SMUX_TEST_LCID);
1937 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301938 while (cb_data.cb_count < 3) {
1939 UT_ASSERT_INT(
1940 (int)wait_for_completion_timeout(
1941 &cb_data.cb_completion, HZ),
1942 >, 0);
1943 INIT_COMPLETION(cb_data.cb_completion);
1944 }
1945 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001946 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
1947 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05301948 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
1949 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001950 break;
1951 }
1952
1953 if (!failed) {
1954 i += scnprintf(buf + i, max - i, "\tOK\n");
1955 } else {
1956 pr_err("%s: Failed\n", __func__);
1957 i += scnprintf(buf + i, max - i, "\tFailed\n");
1958 i += mock_cb_data_print(&cb_data, buf + i, max - i);
1959 msm_smux_close(SMUX_TEST_LCID);
1960 }
1961 smux_byte_loopback = 0;
1962 mock_cb_data_reset(&cb_data);
1963 return i;
1964}
1965
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001966/**
1967 * Verify get_rx_buffer callback retry for auto-rx flow control.
1968 *
1969 * @buf Buffer for status message
1970 * @max Size of buffer
1971 *
1972 * @returns Number of bytes written to @buf
1973 */
1974static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
1975{
1976 static struct smux_mock_callback cb_data;
1977 static int cb_initialized;
1978 int i = 0;
1979 int failed = 0;
1980 int ret;
1981 int try;
1982 int try_rx_retry_wm;
1983
1984 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
1985 pr_err("%s", buf);
1986
1987 if (!cb_initialized)
1988 mock_cb_data_init(&cb_data);
1989
1990 mock_cb_data_reset(&cb_data);
1991 smux_byte_loopback = SMUX_TEST_LCID;
1992 while (!failed) {
1993 /* open port for loopback */
1994 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
1995 SMUX_CH_OPTION_LOCAL_LOOPBACK
1996 | SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
1997 0);
1998 UT_ASSERT_INT(ret, ==, 0);
1999
2000 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
2001 smux_mock_cb, get_rx_buffer_mock);
2002 UT_ASSERT_INT(ret, ==, 0);
2003 UT_ASSERT_INT(
2004 (int)wait_for_completion_timeout(
2005 &cb_data.cb_completion, HZ), >, 0);
2006 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
2007 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
2008 mock_cb_data_reset(&cb_data);
2009
2010 /* Test high rx-retry watermark */
2011 get_rx_buffer_mock_fail = 1;
2012 try_rx_retry_wm = 0;
2013 for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
2014 pr_err("%s: try %d\n", __func__, try);
2015 ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
2016 test_array, sizeof(test_array));
2017 UT_ASSERT_INT(ret, ==, 0);
2018 if (failed)
2019 break;
2020
2021 if (!try_rx_retry_wm &&
2022 cb_data.event_rx_retry_high_wm) {
2023 /* RX high watermark hit */
2024 try_rx_retry_wm = try + 1;
2025 break;
2026 }
2027
2028 while (cb_data.event_write_done <= try) {
2029 UT_ASSERT_INT(
2030 (int)wait_for_completion_timeout(
2031 &cb_data.cb_completion, HZ),
2032 >, 0);
2033 INIT_COMPLETION(cb_data.cb_completion);
2034 }
2035 if (failed)
2036 break;
2037 }
2038 if (failed)
2039 break;
2040
2041 /* RX retry high watermark should have been set */
2042 UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
2043 UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
2044
2045 /*
2046 * Disabled RX buffer allocation failure and wait for
2047 * the SMUX_RX_WM_HIGH count successful packets.
2048 */
2049 get_rx_buffer_mock_fail = 0;
2050 while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
2051 UT_ASSERT_INT(
2052 (int)wait_for_completion_timeout(
2053 &cb_data.cb_completion, 2*HZ),
2054 >, 0);
2055 INIT_COMPLETION(cb_data.cb_completion);
2056 }
2057 if (failed)
2058 break;
2059
2060 UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
2061 UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
2062 cb_data.event_read_done);
2063 UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
2064 mock_cb_data_reset(&cb_data);
2065
2066 /* close port */
2067 ret = msm_smux_close(SMUX_TEST_LCID);
2068 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302069 while (cb_data.cb_count < 3) {
2070 UT_ASSERT_INT(
2071 (int)wait_for_completion_timeout(
2072 &cb_data.cb_completion, HZ),
2073 >, 0);
2074 INIT_COMPLETION(cb_data.cb_completion);
2075 }
2076 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002077 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
2078 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302079 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
2080 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002081 break;
2082 }
2083
2084 if (!failed) {
2085 i += scnprintf(buf + i, max - i, "\tOK\n");
2086 } else {
2087 pr_err("%s: Failed\n", __func__);
2088 i += scnprintf(buf + i, max - i, "\tFailed\n");
2089 i += mock_cb_data_print(&cb_data, buf + i, max - i);
2090 msm_smux_close(SMUX_TEST_LCID);
2091 }
2092 smux_byte_loopback = 0;
2093 mock_cb_data_reset(&cb_data);
2094 return i;
2095}
2096
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06002097/**
2098 * Verify remote flow control (remote TX stop).
2099 *
2100 * @buf Buffer for status message
2101 * @max Size of buffer
2102 *
2103 * @returns Number of bytes written to @buf
2104 */
2105static int smux_ut_remote_tx_stop(char *buf, int max)
2106{
2107 static struct smux_mock_callback cb_data;
2108 static int cb_initialized;
2109 int i = 0;
2110 int failed = 0;
2111 int ret;
2112
2113 i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
2114 pr_err("%s", buf);
2115
2116 if (!cb_initialized)
2117 mock_cb_data_init(&cb_data);
2118
2119 mock_cb_data_reset(&cb_data);
2120 while (!failed) {
2121 /* open port for remote loopback */
2122 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
2123 SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
2124 UT_ASSERT_INT(ret, ==, 0);
2125
2126 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
2127 get_rx_buffer);
2128 UT_ASSERT_INT(ret, ==, 0);
2129 UT_ASSERT_INT(
2130 (int)wait_for_completion_timeout(
2131 &cb_data.cb_completion, HZ), >, 0);
2132 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
2133 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
2134 mock_cb_data_reset(&cb_data);
2135
2136 /* send 1 packet and verify response */
2137 ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
2138 test_array, sizeof(test_array));
2139 UT_ASSERT_INT(ret, ==, 0);
2140 UT_ASSERT_INT(
2141 (int)wait_for_completion_timeout(
2142 &cb_data.cb_completion, HZ),
2143 >, 0);
2144 UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
2145
2146 INIT_COMPLETION(cb_data.cb_completion);
2147 if (!cb_data.event_read_done) {
2148 UT_ASSERT_INT(
2149 (int)wait_for_completion_timeout(
2150 &cb_data.cb_completion, HZ),
2151 >, 0);
2152 }
2153 UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
2154 mock_cb_data_reset(&cb_data);
2155
2156 /* enable flow control */
2157 UT_ASSERT_INT(smux_lch[SMUX_TEST_LCID].tx_flow_control, ==, 0);
2158 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
2159 SMUX_CH_OPTION_REMOTE_TX_STOP, 0);
2160 UT_ASSERT_INT(ret, ==, 0);
2161
2162 /* wait for remote echo and clear our tx_flow control */
2163 msleep(500);
2164 UT_ASSERT_INT(smux_lch[SMUX_TEST_LCID].tx_flow_control, ==, 1);
2165 smux_lch[SMUX_TEST_LCID].tx_flow_control = 0;
2166
2167 /* Send 1 packet and verify no response */
2168 ret = msm_smux_write(SMUX_TEST_LCID, (void *)2,
2169 test_array, sizeof(test_array));
2170 UT_ASSERT_INT(ret, ==, 0);
2171 UT_ASSERT_INT(
2172 (int)wait_for_completion_timeout(
2173 &cb_data.cb_completion, HZ),
2174 >, 0);
2175 INIT_COMPLETION(cb_data.cb_completion);
2176 UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
2177 UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
2178 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
2179
2180 UT_ASSERT_INT(
2181 (int)wait_for_completion_timeout(
2182 &cb_data.cb_completion, 1*HZ),
2183 ==, 0);
2184 UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
2185 mock_cb_data_reset(&cb_data);
2186
2187 /* disable flow control and verify response is received */
2188 UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
2189 ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
2190 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
2191 UT_ASSERT_INT(ret, ==, 0);
2192
2193 UT_ASSERT_INT(
2194 (int)wait_for_completion_timeout(
2195 &cb_data.cb_completion, HZ),
2196 >, 0);
2197 UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
2198 mock_cb_data_reset(&cb_data);
2199
2200 /* close port */
2201 ret = msm_smux_close(SMUX_TEST_LCID);
2202 UT_ASSERT_INT(ret, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302203 while (cb_data.cb_count < 3) {
2204 UT_ASSERT_INT(
2205 (int)wait_for_completion_timeout(
2206 &cb_data.cb_completion, HZ),
2207 >, 0);
2208 INIT_COMPLETION(cb_data.cb_completion);
2209 }
2210 UT_ASSERT_INT(cb_data.cb_count, ==, 3);
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06002211 UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
2212 UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
Arun Kumar Neelakantam9fdf0172013-07-09 15:55:32 +05302213 UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
2214 UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06002215 break;
2216 }
2217
2218 if (!failed) {
2219 i += scnprintf(buf + i, max - i, "\tOK\n");
2220 } else {
2221 pr_err("%s: Failed\n", __func__);
2222 i += scnprintf(buf + i, max - i, "\tFailed\n");
2223 i += mock_cb_data_print(&cb_data, buf + i, max - i);
2224 msm_smux_set_ch_option(SMUX_TEST_LCID,
2225 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
2226 msm_smux_close(SMUX_TEST_LCID);
2227 }
2228 mock_cb_data_reset(&cb_data);
2229 return i;
2230}
2231
Eric Holmberg371b4622013-05-21 18:04:50 -06002232/**
2233 * Verify Remote-initiated wakeup test case.
2234 *
2235 * @buf Output buffer for failure/status messages
2236 * @max Size of @buf
2237 */
2238static int smux_ut_remote_initiated_wakeup(char *buf, int max)
2239{
2240 int i = 0;
2241 int failed = 0;
2242 static struct smux_mock_callback cb_data;
2243 static int cb_initialized;
2244 int ret;
2245
2246 if (!cb_initialized)
2247 mock_cb_data_init(&cb_data);
2248
2249 smux_set_loopback_data_reply_delay(SMUX_REMOTE_DELAY_TIME_MS);
2250 mock_cb_data_reset(&cb_data);
2251 do {
2252 unsigned long start_j;
2253 unsigned transfer_time;
2254 unsigned lwakeups_start;
2255 unsigned rwakeups_start;
2256 unsigned lwakeups_end;
2257 unsigned rwakeups_end;
2258 unsigned lwakeup_delta;
2259 unsigned rwakeup_delta;
2260
2261 /* open port */
2262 ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
2263 get_rx_buffer);
2264 UT_ASSERT_INT(ret, ==, 0);
2265 UT_ASSERT_INT(
2266 (int)wait_for_completion_timeout(
2267 &cb_data.cb_completion, HZ), >, 0);
2268 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
2269 UT_ASSERT_INT(cb_data.event_connected, ==, 1);
2270 mock_cb_data_reset(&cb_data);
2271
2272 /* do local wakeup test and send echo packet */
2273 msleep(SMUX_REMOTE_INACTIVITY_TIME_MS);
2274 smux_get_wakeup_counts(&lwakeups_start, &rwakeups_start);
2275 msm_smux_write(SMUX_TEST_LCID, (void *)0x12345678,
2276 "Hello", 5);
2277 UT_ASSERT_INT(ret, ==, 0);
2278 UT_ASSERT_INT(
2279 (int)wait_for_completion_timeout(
2280 &cb_data.cb_completion, HZ), >, 0);
2281 UT_ASSERT_INT(cb_data.cb_count, ==, 1);
2282 UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
2283 mock_cb_data_reset(&cb_data);
2284
2285 /* verify local initiated wakeup */
2286 smux_get_wakeup_counts(&lwakeups_end, &rwakeups_end);
2287 if (lwakeups_end > lwakeups_start)
2288 i += scnprintf(buf + i, max - i,
2289 "\tGood - have Apps-initiated wakeup\n");
2290 else
2291 i += scnprintf(buf + i, max - i,
2292 "\tBad - no Apps-initiated wakeup\n");
2293
2294 /* verify remote wakeup and echo response */
2295 smux_get_wakeup_counts(&lwakeups_start, &rwakeups_start);
2296 start_j = jiffies;
2297 INIT_COMPLETION(cb_data.cb_completion);
2298 if (!cb_data.event_read_done)
2299 UT_ASSERT_INT(
2300 (int)wait_for_completion_timeout(
2301 &cb_data.cb_completion,
2302 SMUX_REMOTE_DELAY_TIME_MS * 2),
2303 >, 0);
2304 transfer_time = (unsigned)jiffies_to_msecs(jiffies - start_j);
2305 UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
2306 UT_ASSERT_INT_IN_RANGE(transfer_time,
2307 SMUX_REMOTE_DELAY_TIME_MS -
2308 SMUX_REMOTE_INACTIVITY_TIME_MS,
2309 SMUX_REMOTE_DELAY_TIME_MS +
2310 SMUX_REMOTE_INACTIVITY_TIME_MS);
2311 smux_get_wakeup_counts(&lwakeups_end, &rwakeups_end);
2312
2313 lwakeup_delta = lwakeups_end - lwakeups_end;
2314 rwakeup_delta = rwakeups_end - rwakeups_end;
2315 if (rwakeup_delta && lwakeup_delta) {
2316 i += scnprintf(buf + i, max - i,
2317 "\tBoth local and remote wakeup - re-run test (transfer time %d ms)\n",
2318 transfer_time);
2319 failed = 1;
2320 break;
2321 } else if (lwakeup_delta) {
2322 i += scnprintf(buf + i, max - i,
2323 "\tLocal wakeup only (transfer time %d ms) - FAIL\n",
2324 transfer_time);
2325 failed = 1;
2326 break;
2327 } else {
2328 i += scnprintf(buf + i, max - i,
2329 "\tRemote wakeup verified (transfer time %d ms) - OK\n",
2330 transfer_time);
2331 }
2332 } while (0);
2333
2334 if (!failed) {
2335 i += scnprintf(buf + i, max - i, "\tOK\n");
2336 } else {
2337 pr_err("%s: Failed\n", __func__);
2338 i += scnprintf(buf + i, max - i, "\tFailed\n");
2339 i += mock_cb_data_print(&cb_data, buf + i, max - i);
2340 }
2341
2342 mock_cb_data_reset(&cb_data);
2343 msm_smux_close(SMUX_TEST_LCID);
2344 wait_for_completion_timeout(&cb_data.cb_completion, HZ);
2345
2346 mock_cb_data_reset(&cb_data);
2347 smux_set_loopback_data_reply_delay(0);
2348
2349 return i;
2350}
2351
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002352static char debug_buffer[DEBUG_BUFMAX];
2353
2354static ssize_t debug_read(struct file *file, char __user *buf,
2355 size_t count, loff_t *ppos)
2356{
2357 int (*fill)(char *buf, int max) = file->private_data;
2358 int bsize;
2359
2360 if (*ppos != 0)
2361 return 0;
2362
2363 bsize = fill(debug_buffer, DEBUG_BUFMAX);
2364 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
2365}
2366
2367static int debug_open(struct inode *inode, struct file *file)
2368{
2369 file->private_data = inode->i_private;
2370 return 0;
2371}
2372
2373static const struct file_operations debug_ops = {
2374 .read = debug_read,
2375 .open = debug_open,
2376};
2377
2378static void debug_create(const char *name, mode_t mode,
2379 struct dentry *dent,
2380 int (*fill)(char *buf, int max))
2381{
2382 debugfs_create_file(name, mode, dent, fill, &debug_ops);
2383}
2384
2385static int __init smux_debugfs_init(void)
2386{
2387 struct dentry *dent;
2388
Eric Holmberg9d890672012-06-13 17:58:13 -06002389 dent = debugfs_create_dir("n_smux_test", 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002390 if (IS_ERR(dent))
2391 return PTR_ERR(dent);
2392
2393 /*
2394 * Add Unit Test entries.
2395 *
2396 * The idea with unit tests is that you can run all of them
2397 * from ADB shell by doing:
2398 * adb shell
2399 * cat ut*
2400 *
2401 * And if particular tests fail, you can then repeatedly run the failing
2402 * tests as you debug and resolve the failing test.
2403 */
2404 debug_create("ut_local_basic", 0444, dent, smux_ut_basic);
2405 debug_create("ut_remote_basic", 0444, dent, smux_ut_remote_basic);
2406 debug_create("ut_local_big_pkt", 0444, dent, smux_ut_local_big_pkt);
2407 debug_create("ut_remote_big_pkt", 0444, dent, smux_ut_remote_big_pkt);
2408 debug_create("ut_local_tiocm", 0444, dent, smux_ut_local_tiocm);
2409 debug_create("ut_remote_tiocm", 0444, dent, smux_ut_remote_tiocm);
2410 debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
2411 debug_create("ut_local_smuxld_receive_buf", 0444, dent,
2412 smux_ut_local_smuxld_receive_buf);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002413 debug_create("ut_local_get_rx_buff_retry", 0444, dent,
2414 smux_ut_local_get_rx_buff_retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002415 debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
2416 smux_ut_local_get_rx_buff_retry_auto);
Eric Holmberg9c76e2e2013-05-21 18:08:06 -06002417 debug_create("ut_ssr_remote_basic", 0444, dent,
2418 smux_ut_ssr_remote_basic);
2419 debug_create("ut_ssr_remote_open", 0444, dent,
2420 smux_ut_ssr_remote_open);
2421 debug_create("ut_ssr_remote_rx_buff_retry", 0444, dent,
2422 smux_ut_ssr_remote_rx_buff_retry);
Eric Holmbergeee5d5a2012-08-13 14:45:27 -06002423 debug_create("ut_remote_tx_stop", 0444, dent,
2424 smux_ut_remote_tx_stop);
Eric Holmberg6966d5d2012-10-11 11:13:46 -06002425 debug_create("ut_remote_throughput", 0444, dent,
2426 smux_ut_remote_throughput);
Eric Holmberg371b4622013-05-21 18:04:50 -06002427 debug_create("ut_remote_initiated_wakeup", 0444, dent,
2428 smux_ut_remote_initiated_wakeup);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002429 return 0;
2430}
2431
2432late_initcall(smux_debugfs_init);
2433