aio: lift iov_iter_init() into aio_setup_..._rw()

the only non-trivial detail is that we do it before rw_verify_area(),
so we'd better cap the length ourselves in aio_setup_single_rw()
case (for vectored case rw_copy_check_uvector() will do that for us).

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
diff --git a/fs/aio.c b/fs/aio.c
index 435ca29..7816e8e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1357,7 +1357,8 @@
 				     unsigned long *nr_segs,
 				     size_t *len,
 				     struct iovec **iovec,
-				     bool compat)
+				     bool compat,
+				     struct iov_iter *iter)
 {
 	ssize_t ret;
 
@@ -1378,6 +1379,7 @@
 
 	/* len now reflect bytes instead of segs */
 	*len = ret;
+	iov_iter_init(iter, rw, *iovec, *nr_segs, *len);
 	return 0;
 }
 
@@ -1385,14 +1387,18 @@
 				       int rw, char __user *buf,
 				       unsigned long *nr_segs,
 				       size_t len,
-				       struct iovec *iovec)
+				       struct iovec *iovec,
+				       struct iov_iter *iter)
 {
+	if (len > MAX_RW_COUNT)
+		len = MAX_RW_COUNT;
 	if (unlikely(!access_ok(!rw, buf, len)))
 		return -EFAULT;
 
 	iovec->iov_base = buf;
 	iovec->iov_len = len;
 	*nr_segs = 1;
+	iov_iter_init(iter, rw, iovec, *nr_segs, len);
 	return 0;
 }
 
@@ -1438,10 +1444,10 @@
 
 		if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
 			ret = aio_setup_vectored_rw(req, rw, buf, &nr_segs,
-						&len, &iovec, compat);
+						&len, &iovec, compat, &iter);
 		else
 			ret = aio_setup_single_vector(req, rw, buf, &nr_segs,
-						  len, iovec);
+						  len, iovec, &iter);
 		if (!ret)
 			ret = rw_verify_area(rw, file, &req->ki_pos, len);
 		if (ret < 0) {
@@ -1463,10 +1469,9 @@
 			file_start_write(file);
 
 		if (iter_op) {
-			iov_iter_init(&iter, rw, iovec, nr_segs, len);
 			ret = iter_op(req, &iter);
 		} else {
-			ret = rw_op(req, iovec, nr_segs, req->ki_pos);
+			ret = rw_op(req, iter.iov, iter.nr_segs, req->ki_pos);
 		}
 
 		if (rw == WRITE)