ehea: using wait queues instead of msleep on ehea_flush_sq
This patch just remove a msleep loop and change to wait queue,
making the code cleaner.
Signed-off-by: Breno Leitao <leitao@linux.vnet.ibm.com>
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 190fb69..7897bdf 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -888,6 +888,7 @@
pr->queue_stopped = 0;
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
+ wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
@@ -2652,6 +2653,8 @@
netif_start_queue(dev);
}
+ init_waitqueue_head(&port->swqe_avail_wq);
+
mutex_unlock(&port->port_lock);
return ret;
@@ -2724,13 +2727,15 @@
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
- int k = 0;
- while (atomic_read(&pr->swqe_avail) < swqe_max) {
- msleep(5);
- if (++k == 20) {
- ehea_error("WARNING: sq not flushed completely");
- break;
- }
+ int ret;
+
+ ret = wait_event_timeout(port->swqe_avail_wq,
+ atomic_read(&pr->swqe_avail) >= swqe_max,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ ehea_error("WARNING: sq not flushed completely");
+ break;
}
}
}