mtd: fix a huge latency problem in the MTD CFI and LPDDR flash drivers.
The use of a memcpy() during a spinlock operation will cause very long
thread context switch delays if the flash chip bandwidth is low and the
data to be copied large, because a spinlock will disable preemption.
For example: A flash with 6,5 MB/s bandwidth will cause under ubifs,
which request sometimes 128 KiB (the flash erase size), a preemption delay of
20 milliseconds. High priority threads will not be served during this
time, regardless whether this threads access the flash or not. This behavior
breaks real time.
The patch changes all the use of spin_lock operations for xxxx->mutex
into mutex operations, which is exact what the name says and means.
I have checked the code of the drivers and there is no use of atomic
pathes like interrupt or timers. The mtdoops facility will also not be used
by this drivers. So it is dave to replace the spin_lock against mutex.
There is no performance regression since the mutex is normally not
acquired.
Changelog:
06.03.2010 First release
26.03.2010 Fix mutex[1] issue and tested it for compile failure
Signed-off-by: Stefani Seibold <stefani@seibold.net>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9253043..62f3ea9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -725,8 +725,7 @@
/* those should be reset too since
they create memory references. */
init_waitqueue_head(&chip->wq);
- spin_lock_init(&chip->_spinlock);
- chip->mutex = &chip->_spinlock;
+ mutex_init(&chip->mutex);
chip++;
}
}
@@ -772,9 +771,9 @@
if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
break;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
return -EAGAIN;
}
@@ -821,9 +820,9 @@
return -EIO;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
cfi_udelay(1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
@@ -850,10 +849,10 @@
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
return -EAGAIN;
}
}
@@ -899,20 +898,20 @@
* it'll happily send us to sleep. In any case, when
* get_chip returns success we're clear to go ahead.
*/
- ret = spin_trylock(contender->mutex);
+ ret = mutex_trylock(&contender->mutex);
spin_unlock(&shared->lock);
if (!ret)
goto retry;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
ret = chip_ready(map, contender, contender->start, mode);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (ret == -EAGAIN) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
if (ret) {
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
return ret;
}
spin_lock(&shared->lock);
@@ -921,10 +920,10 @@
* in FL_SYNCING state. Put contender and retry. */
if (chip->state == FL_SYNCING) {
put_chip(map, contender, contender->start);
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
goto retry;
}
- spin_unlock(contender->mutex);
+ mutex_unlock(&contender->mutex);
}
/* Check if we already have suspended erase
@@ -934,10 +933,10 @@
spin_unlock(&shared->lock);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
goto retry;
}
@@ -967,12 +966,12 @@
if (shared->writing && shared->writing != chip) {
/* give back ownership to who we loaned it from */
struct flchip *loaner = shared->writing;
- spin_lock(loaner->mutex);
+ mutex_lock(&loaner->mutex);
spin_unlock(&shared->lock);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
put_chip(map, loaner, loaner->start);
- spin_lock(chip->mutex);
- spin_unlock(loaner->mutex);
+ mutex_lock(&chip->mutex);
+ mutex_unlock(&loaner->mutex);
wake_up(&chip->wq);
return;
}
@@ -1142,7 +1141,7 @@
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
@@ -1152,15 +1151,15 @@
* a suspended erase state. If so let's wait
* until it's done.
*/
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != newstate) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
@@ -1216,10 +1215,10 @@
int chip_state = chip->state;
unsigned int timeo, sleep_time, reset_timeo;
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
timeo = chip_op_time_max;
if (!timeo)
@@ -1239,7 +1238,7 @@
}
/* OK Still waiting. Drop the lock, wait a while and retry. */
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
@@ -1254,17 +1253,17 @@
cond_resched();
timeo--;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
while (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
}
if (chip->erase_suspended && chip_state == FL_ERASING) {
/* Erase suspend occured while sleep: reset timeout */
@@ -1300,7 +1299,7 @@
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_POINT);
@@ -1311,7 +1310,7 @@
chip->state = FL_POINT;
chip->ref_point_counter++;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1396,7 +1395,7 @@
else
thislen = len;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_POINT) {
chip->ref_point_counter--;
if(chip->ref_point_counter == 0)
@@ -1405,7 +1404,7 @@
printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
len -= thislen;
ofs = 0;
@@ -1424,10 +1423,10 @@
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1441,7 +1440,7 @@
put_chip(map, chip, cmd_addr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -1504,10 +1503,10 @@
return -EINVAL;
}
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1553,7 +1552,7 @@
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1662,10 +1661,10 @@
/* Let's determine this according to the interleave only once */
write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_adr, FL_WRITING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1796,7 +1795,7 @@
xip_enable(map, chip, cmd_adr);
out: put_chip(map, chip, cmd_adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1875,10 +1874,10 @@
adr += chip->start;
retry:
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1934,7 +1933,7 @@
} else if (chipstatus & 0x20 && retries--) {
printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
goto retry;
} else {
printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
@@ -1946,7 +1945,7 @@
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -1979,7 +1978,7 @@
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SYNCING);
if (!ret) {
@@ -1990,7 +1989,7 @@
* with the chip now anyway.
*/
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -1998,14 +1997,14 @@
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2051,10 +2050,10 @@
adr += chip->start;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2088,7 +2087,7 @@
xip_enable(map, chip, adr);
out: put_chip(map, chip, adr);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2153,10 +2152,10 @@
struct cfi_private *cfi = map->fldrv_priv;
int ret;
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
if (ret) {
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return ret;
}
@@ -2175,7 +2174,7 @@
INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
put_chip(map, chip, chip->start);
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
return 0;
}
@@ -2450,7 +2449,7 @@
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
switch (chip->state) {
case FL_READY:
@@ -2482,7 +2481,7 @@
case FL_PM_SUSPENDED:
break;
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
@@ -2491,7 +2490,7 @@
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
@@ -2501,7 +2500,7 @@
chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
}
@@ -2542,7 +2541,7 @@
chip = &cfi->chips[i];
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
@@ -2551,7 +2550,7 @@
wake_up(&chip->wq);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
if ((mtd->flags & MTD_POWERUP_LOCK)
@@ -2571,14 +2570,14 @@
/* force the completion of any ongoing operation
and switch to array mode so any bootloader in
flash is accessible for soft reboot. */
- spin_lock(chip->mutex);
+ mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
- spin_unlock(chip->mutex);
+ mutex_unlock(&chip->mutex);
}
return 0;