Merge "Accelerate nand device in virtualization environment"
diff --git a/hw/goldfish_nand.c b/hw/goldfish_nand.c
index eb0c355..49d575a 100644
--- a/hw/goldfish_nand.c
+++ b/hw/goldfish_nand.c
@@ -130,6 +130,8 @@
uint32_t addr_high;
uint32_t transfer_size;
uint32_t data;
+ uint32_t batch_addr_low;
+ uint32_t batch_addr_high;
uint32_t result;
} nand_dev_controller_state;
@@ -457,6 +459,18 @@
uint64_t addr;
nand_dev *dev;
+ if (cmd == NAND_CMD_WRITE_BATCH || cmd == NAND_CMD_READ_BATCH ||
+ cmd == NAND_CMD_ERASE_BATCH) {
+ struct batch_data bd;
+ uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low;
+
+ cpu_physical_memory_read(bd_addr, (void*)&bd, sizeof(struct batch_data));
+ s->dev = bd.dev;
+ s->addr_low = bd.addr_low;
+ s->addr_high = bd.addr_high;
+ s->transfer_size = bd.transfer_size;
+ s->data = bd.data;
+ }
addr = s->addr_low | ((uint64_t)s->addr_high << 32);
size = s->transfer_size;
if(s->dev >= nand_dev_count)
@@ -473,6 +487,7 @@
#endif
cpu_memory_rw_debug(cpu_single_env, s->data, (uint8_t*)dev->devname, size, 1);
return size;
+ case NAND_CMD_READ_BATCH:
case NAND_CMD_READ:
if(addr >= dev->max_size)
return 0;
@@ -486,6 +501,7 @@
#endif
cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1);
return size;
+ case NAND_CMD_WRITE_BATCH:
case NAND_CMD_WRITE:
if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
return 0;
@@ -501,6 +517,7 @@
#endif
cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0);
return size;
+ case NAND_CMD_ERASE_BATCH:
case NAND_CMD_ERASE:
if(dev->flags & NAND_DEV_FLAG_READ_ONLY)
return 0;
@@ -542,6 +559,12 @@
case NAND_ADDR_LOW:
s->addr_low = value;
break;
+ case NAND_BATCH_ADDR_LOW:
+ s->batch_addr_low = value;
+ break;
+ case NAND_BATCH_ADDR_HIGH:
+ s->batch_addr_high = value;
+ break;
case NAND_TRANSFER_SIZE:
s->transfer_size = value;
break;
@@ -550,6 +573,13 @@
break;
case NAND_COMMAND:
s->result = nand_dev_do_cmd(s, value);
+ if (value == NAND_CMD_WRITE_BATCH || value == NAND_CMD_READ_BATCH ||
+ value == NAND_CMD_ERASE_BATCH) {
+ struct batch_data bd;
+ uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low;
+ bd.result = s->result;
+ cpu_physical_memory_write(bd_addr, (void*)&bd, sizeof(struct batch_data));
+ }
break;
default:
cpu_abort(cpu_single_env, "nand_dev_write: Bad offset %x\n", offset);
@@ -810,6 +840,9 @@
if(dev->data == NULL)
goto out_of_memory;
dev->flags = read_only ? NAND_DEV_FLAG_READ_ONLY : 0;
+#ifdef TARGET_I386
+ dev->flags |= NAND_DEV_FLAG_BATCH_CAP;
+#endif
if (initfd >= 0) {
do {
diff --git a/hw/goldfish_nand_reg.h b/hw/goldfish_nand_reg.h
index ea91461..34d7c44 100644
--- a/hw/goldfish_nand_reg.h
+++ b/hw/goldfish_nand_reg.h
@@ -18,11 +18,24 @@
NAND_CMD_WRITE,
NAND_CMD_ERASE,
NAND_CMD_BLOCK_BAD_GET, // NAND_RESULT is 1 if block is bad, 0 if it is not
- NAND_CMD_BLOCK_BAD_SET
+ NAND_CMD_BLOCK_BAD_SET,
+ NAND_CMD_READ_BATCH, // BATCH OP extensions.
+ NAND_CMD_WRITE_BATCH,
+ NAND_CMD_ERASE_BATCH
+};
+
+struct batch_data{
+ uint32_t dev;
+ uint32_t addr_low;
+ uint32_t addr_high;
+ uint32_t transfer_size;
+ uint32_t data;
+ uint32_t result;
};
enum nand_dev_flags {
- NAND_DEV_FLAG_READ_ONLY = 0x00000001
+ NAND_DEV_FLAG_READ_ONLY = 0x00000001,
+ NAND_DEV_FLAG_BATCH_CAP = 0x00000002
};
#define NAND_VERSION_CURRENT (1)
@@ -49,6 +62,8 @@
NAND_TRANSFER_SIZE = 0x04c,
NAND_ADDR_LOW = 0x050,
NAND_ADDR_HIGH = 0x054,
+ NAND_BATCH_ADDR_LOW = 0x058,
+ NAND_BATCH_ADDR_HIGH= 0x05c,
};
#endif