spi: core: Fix deadlock when sending messages
The function __spi_pump_messages() is called by spi_pump_messages() and
__spi_sync(). The function __spi_sync() has an argument 'bus_locked'
that indicates if it is called with the SPI bus mutex held or not. If
'bus_locked' is false then __spi_sync() will acquire the mutex itself.
Commit 556351f14e74 ("spi: introduce accelerated read support for spi
flash devices") made a change to acquire the SPI bus mutex within
__spi_pump_messages(). However, this change did not check to see if the
mutex is already held. If __spi_sync() is called with the mutex held
(ie. 'bus_locked' is true), then a deadlock occurs when
__spi_pump_messages() is called.
Fix this deadlock by passing the 'bus_locked' state from __spi_sync() to
__spi_pump_messages() and only acquire the mutex if not already held. In
the case where __spi_pump_messages() is called from spi_pump_messages()
it is assumed that the mutex is not held and so call
__spi_pump_messages() with 'bus_locked' set to false. Finally, move the
unlocking of the mutex to the end of the __spi_pump_messages() function
to simplify the code and only call cond_resched() if there are no
errors.
Fixes: 556351f14e74 ("spi: introduce accelerated read support for spi flash devices")
Signed-off-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0b2bbf1..f565cc8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1047,6 +1047,7 @@
* __spi_pump_messages - function which processes spi message queue
* @master: master to process queue for
* @in_kthread: true if we are in the context of the message pump thread
+ * @bus_locked: true if the bus mutex is held when calling this function
*
* This function checks if there is any spi message in the queue that
* needs processing and if so call out to the driver to initialize hardware
@@ -1056,7 +1057,8 @@
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
+ bool bus_locked)
{
unsigned long flags;
bool was_busy = false;
@@ -1152,7 +1154,9 @@
}
}
- mutex_lock(&master->bus_lock_mutex);
+ if (!bus_locked)
+ mutex_lock(&master->bus_lock_mutex);
+
trace_spi_message_start(master->cur_msg);
if (master->prepare_message) {
@@ -1162,8 +1166,7 @@
"failed to prepare message: %d\n", ret);
master->cur_msg->status = ret;
spi_finalize_current_message(master);
- mutex_unlock(&master->bus_lock_mutex);
- return;
+ goto out;
}
master->cur_msg_prepared = true;
}
@@ -1172,21 +1175,23 @@
if (ret) {
master->cur_msg->status = ret;
spi_finalize_current_message(master);
- mutex_unlock(&master->bus_lock_mutex);
- return;
+ goto out;
}
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
"failed to transfer one message from queue\n");
- mutex_unlock(&master->bus_lock_mutex);
- return;
+ goto out;
}
- mutex_unlock(&master->bus_lock_mutex);
+
+out:
+ if (!bus_locked)
+ mutex_unlock(&master->bus_lock_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
- cond_resched();
+ if (!ret)
+ cond_resched();
}
/**
@@ -1198,7 +1203,7 @@
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
- __spi_pump_messages(master, true);
+ __spi_pump_messages(master, true, false);
}
static int spi_init_queue(struct spi_master *master)
@@ -2462,7 +2467,7 @@
spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
spi_sync_immediate);
- __spi_pump_messages(master, false);
+ __spi_pump_messages(master, false, bus_locked);
}
wait_for_completion(&done);