blob: ad999a27c92ac139fd4c7c2aeed226281f1482df [file] [log] [blame]
Vipin Deep Kaurfecef602019-08-05 12:15:36 +05301/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
Shrey Vijayd494f5e2017-07-24 16:08:33 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * I2C controller driver for Qualcomm Technologies Inc platforms
15 */
16
17#define pr_fmt(fmt) "#%d " fmt "\n", __LINE__
18
19#include <linux/module.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/mutex.h>
28#include <linux/timer.h>
29#include <linux/time.h>
30#include <linux/slab.h>
31#include <linux/pm_runtime.h>
32#include <linux/dma-mapping.h>
33#include <linux/i2c.h>
34#include <linux/of.h>
35#include <linux/msm-sps.h>
36#include <linux/msm-bus.h>
37#include <linux/msm-bus-board.h>
38#include <linux/i2c/i2c-msm-v2.h>
39
40#ifdef DEBUG
41static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_DBG;
42#else
43static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_ERR;
44#endif
45
46/* Forward declarations */
47static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl);
48static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
49 struct completion *complete);
50static int i2c_msm_pm_resume(struct device *dev);
51static void i2c_msm_pm_suspend(struct device *dev);
52static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
53static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
54 bool runtime_active);
55
56/* string table for enum i2c_msm_xfer_mode_id */
57const char * const i2c_msm_mode_str_tbl[] = {
58 "FIFO", "BLOCK", "DMA", "None",
59};
60
61static const u32 i2c_msm_fifo_block_sz_tbl[] = {16, 16, 32, 0};
62
63/* from enum i2c_msm_xfer_mode_id to qup_io_modes register values */
64static const u32 i2c_msm_mode_to_reg_tbl[] = {
65 0x0, /* map I2C_MSM_XFER_MODE_FIFO -> binary 00 */
66 0x1, /* map I2C_MSM_XFER_MODE_BLOCK -> binary 01 */
67 0x3 /* map I2C_MSM_XFER_MODE_DMA -> binary 11 */
68};
69
70const char *i2c_msm_err_str_table[] = {
71 [I2C_MSM_NO_ERR] = "NONE",
72 [I2C_MSM_ERR_NACK] = "NACK: slave not responding, ensure its powered",
73 [I2C_MSM_ERR_ARB_LOST] = "ARB_LOST",
74 [I2C_MSM_ERR_BUS_ERR] = "BUS ERROR:noisy bus/unexpected start/stop tag",
75 [I2C_MSM_ERR_TIMEOUT] = "TIMEOUT_ERROR",
76 [I2C_MSM_ERR_CORE_CLK] = "CLOCK OFF: Check Core Clock",
77 [I2C_MSM_ERR_OVR_UNDR_RUN] = "OVER_UNDER_RUN_ERROR",
78};
79
80static void i2c_msm_dbg_dump_diag(struct i2c_msm_ctrl *ctrl,
81 bool use_param_vals, u32 status, u32 qup_op)
82{
83 struct i2c_msm_xfer *xfer = &ctrl->xfer;
84 const char *str = i2c_msm_err_str_table[xfer->err];
85 char buf[I2C_MSM_REG_2_STR_BUF_SZ];
86
87 if (!use_param_vals) {
88 void __iomem *base = ctrl->rsrcs.base;
89
90 status = readl_relaxed(base + QUP_I2C_STATUS);
91 qup_op = readl_relaxed(base + QUP_OPERATIONAL);
92 }
93
94 if (xfer->err == I2C_MSM_ERR_TIMEOUT) {
95 /*
96 * if we are not the bus master or SDA/SCL is low then it may be
97 * that slave is pulling the lines low. Otherwise it is likely a
98 * GPIO issue
99 */
100 if (!(status & QUP_BUS_MASTER))
101 snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
102 "%s(val:%dmsec) misconfigured GPIO or slave pulling bus line(s) low\n",
103 str, jiffies_to_msecs(xfer->timeout));
104 else
105 snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
106 "%s(val:%dmsec)", str, jiffies_to_msecs(xfer->timeout));
107
108 str = buf;
109 }
110
111 /* dump xfer details */
112 dev_err(ctrl->dev,
113 "%s: msgs(n:%d cur:%d %s) bc(rx:%zu tx:%zu) mode:%s slv_addr:0x%0x MSTR_STS:0x%08x OPER:0x%08x\n",
114 str, xfer->msg_cnt, xfer->cur_buf.msg_idx,
115 xfer->cur_buf.is_rx ? "rx" : "tx", xfer->rx_cnt, xfer->tx_cnt,
116 i2c_msm_mode_str_tbl[xfer->mode_id], xfer->msgs->addr,
117 status, qup_op);
118}
119
120static u32 i2c_msm_reg_io_modes_out_blk_sz(u32 qup_io_modes)
121{
122 return i2c_msm_fifo_block_sz_tbl[qup_io_modes & 0x3];
123}
124
125static u32 i2c_msm_reg_io_modes_in_blk_sz(u32 qup_io_modes)
126{
127 return i2c_msm_fifo_block_sz_tbl[BITS_AT(qup_io_modes, 5, 2)];
128}
129
130static const u32 i2c_msm_fifo_sz_table[] = {2, 4, 8, 16};
131
132static void i2c_msm_qup_fifo_calc_size(struct i2c_msm_ctrl *ctrl)
133{
134 u32 reg_data, output_fifo_size, input_fifo_size;
135 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
136
137 /* Gurad to read fifo size only once. It hard wired and never changes */
138 if (fifo->input_fifo_sz && fifo->output_fifo_sz)
139 return;
140
141 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
142 output_fifo_size = BITS_AT(reg_data, 2, 2);
143 input_fifo_size = BITS_AT(reg_data, 7, 2);
144
145 fifo->input_fifo_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data) *
146 i2c_msm_fifo_sz_table[input_fifo_size];
147 fifo->output_fifo_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data) *
148 i2c_msm_fifo_sz_table[output_fifo_size];
149
150 i2c_msm_dbg(ctrl, MSM_PROF, "QUP input-sz:%zu, input-sz:%zu",
151 fifo->input_fifo_sz, fifo->output_fifo_sz);
152
153}
154
155/*
156 * i2c_msm_tag_byte: accessor for tag as four bytes array
157 */
158static u8 *i2c_msm_tag_byte(struct i2c_msm_tag *tag, int byte_n)
159{
160 return ((u8 *)tag) + byte_n;
161}
162
163/*
164 * i2c_msm_buf_to_ptr: translates a xfer buf to a pointer into the i2c_msg data
165 */
166static u8 *i2c_msm_buf_to_ptr(struct i2c_msm_xfer_buf *buf)
167{
168 struct i2c_msm_xfer *xfer =
169 container_of(buf, struct i2c_msm_xfer, cur_buf);
170 struct i2c_msg *msg = xfer->msgs + buf->msg_idx;
171
172 return msg->buf + buf->byte_idx;
173}
174
175/*
176 * tag_lookup_table[is_new_addr][is_last][is_rx]
177 * @is_new_addr Is start tag required? (which requires two more bytes.)
178 * @is_last Use the XXXXX_N_STOP tag variant
179 * @is_rx READ/WRITE
180 */
181static const struct i2c_msm_tag tag_lookup_table[2][2][2] = {
182 {{{QUP_TAG2_DATA_WRITE, 2},
183 {QUP_TAG2_DATA_READ, 2} },
184 /* last buffer */
185 {{QUP_TAG2_DATA_WRITE_N_STOP, 2},
186 {QUP_TAG2_DATA_READ_N_STOP, 2} } },
187 /* new addr */
188 {{{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE << 16), 4},
189 {QUP_TAG2_START | (QUP_TAG2_DATA_READ << 16), 4} },
190 /* last buffer + new addr */
191 {{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE_N_STOP << 16), 4},
192 {QUP_TAG2_START | (QUP_TAG2_DATA_READ_N_STOP << 16), 4} } },
193};
194
195/*
196 * i2c_msm_tag_create: format a qup tag ver2
197 */
198static struct i2c_msm_tag i2c_msm_tag_create(bool is_new_addr, bool is_last_buf,
199 bool is_rx, u8 buf_len, u8 slave_addr)
200{
201 struct i2c_msm_tag tag;
202 /* Normalize booleans to 1 or 0 */
203 is_new_addr = is_new_addr ? 1 : 0;
204 is_last_buf = is_last_buf ? 1 : 0;
205 is_rx = is_rx ? 1 : 0;
206
207 tag = tag_lookup_table[is_new_addr][is_last_buf][is_rx];
208 /* fill in the non-const value: the address and the length */
209 if (tag.len == I2C_MSM_TAG2_MAX_LEN) {
210 *i2c_msm_tag_byte(&tag, 1) = slave_addr;
211 *i2c_msm_tag_byte(&tag, 3) = buf_len;
212 } else {
213 *i2c_msm_tag_byte(&tag, 1) = buf_len;
214 }
215
216 return tag;
217}
218
219static int
220i2c_msm_qup_state_wait_valid(struct i2c_msm_ctrl *ctrl,
221 enum i2c_msm_qup_state state, bool only_valid)
222{
223 u32 status;
224 void __iomem *base = ctrl->rsrcs.base;
225 int ret = 0;
226 int read_cnt = 0;
227
228 do {
229 status = readl_relaxed(base + QUP_STATE);
230 ++read_cnt;
231
232 /*
233 * If only valid bit needs to be checked, requested state is
234 * 'don't care'
235 */
236 if (status & QUP_STATE_VALID) {
237 if (only_valid)
238 goto poll_valid_end;
239 else if ((state & QUP_I2C_MAST_GEN) &&
240 (status & QUP_I2C_MAST_GEN))
241 goto poll_valid_end;
242 else if ((status & QUP_STATE_MASK) == state)
243 goto poll_valid_end;
244 }
245
246 /*
247 * Sleeping for 1-1.5 ms for every 100 iterations and break if
248 * iterations crosses the 1500 marks allows roughly 10-15 msec
249 * of time to get the core to valid state.
250 */
251 if (!(read_cnt % 100))
252 usleep_range(1000, 1500);
253 } while (read_cnt <= 1500);
254
255 ret = -ETIMEDOUT;
256 dev_err(ctrl->dev,
257 "error timeout on polling for valid state. check core_clk\n");
258
259poll_valid_end:
260 if (!only_valid)
261 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_VALID_END,
262 /* aggregate ret and state */
263 (((-ret) & 0xff) | ((state & 0xf) << 16)),
264 read_cnt, status);
265
266 return ret;
267}
268
269static int i2c_msm_qup_state_set(struct i2c_msm_ctrl *ctrl,
270 enum i2c_msm_qup_state state)
271{
272 if (i2c_msm_qup_state_wait_valid(ctrl, 0, true))
273 return -EIO;
274
275 writel_relaxed(state, ctrl->rsrcs.base + QUP_STATE);
276
277 if (i2c_msm_qup_state_wait_valid(ctrl, state, false))
278 return -EIO;
279
280 return 0;
281}
282
283static int i2c_msm_qup_sw_reset(struct i2c_msm_ctrl *ctrl)
284{
285 int ret;
286
287 writel_relaxed(1, ctrl->rsrcs.base + QUP_SW_RESET);
288 /*
289 * Ensure that QUP that reset state is written before waiting for a the
290 * reset state to be valid.
291 */
292 wmb();
293 ret = i2c_msm_qup_state_wait_valid(ctrl, QUP_STATE_RESET, false);
294 if (ret) {
295 if (atomic_read(&ctrl->xfer.is_active))
296 ctrl->xfer.err = I2C_MSM_ERR_CORE_CLK;
297 dev_err(ctrl->dev, "error on issuing QUP software-reset\n");
298 }
299 return ret;
300}
301
302/*
303 * i2c_msm_qup_xfer_init_reset_state: setup QUP registers for the next run state
304 * @pre QUP must be in reset state.
305 * @pre xfer->mode_id is set to the chosen transfer state
306 * @post update values in QUP_MX_*_COUNT, QUP_CONFIG, QUP_IO_MODES,
307 * and QUP_OPERATIONAL_MASK registers
308 */
309static void
310i2c_msm_qup_xfer_init_reset_state(struct i2c_msm_ctrl *ctrl)
311{
312 struct i2c_msm_xfer *xfer = &ctrl->xfer;
313 void __iomem * const base = ctrl->rsrcs.base;
314 u32 mx_rd_cnt = 0;
315 u32 mx_wr_cnt = 0;
316 u32 mx_in_cnt = 0;
317 u32 mx_out_cnt = 0;
318 u32 no_input = 0;
319 u32 no_output = 0;
320 u32 input_mode = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 12;
321 u32 output_mode = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 10;
322 u32 config_reg;
323 u32 io_modes_reg;
324 u32 op_mask;
325 u32 rx_cnt = 0;
326 u32 tx_cnt = 0;
327 /*
328 * DMA mode:
329 * 1. QUP_MX_*_COUNT must be zero in all cases.
330 * 2. both QUP_NO_INPUT and QUP_NO_OUTPUT are unset.
331 * FIFO mode:
332 * 1. QUP_MX_INPUT_COUNT and QUP_MX_OUTPUT_COUNT are zero
333 * 2. QUP_MX_READ_COUNT and QUP_MX_WRITE_COUNT reflect true count
334 * 3. QUP_NO_INPUT and QUP_NO_OUTPUT are set according to counts
335 */
336 if (xfer->mode_id != I2C_MSM_XFER_MODE_DMA) {
337 rx_cnt = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
338 tx_cnt = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
339 no_input = rx_cnt ? 0 : QUP_NO_INPUT;
340
341 switch (xfer->mode_id) {
342 case I2C_MSM_XFER_MODE_FIFO:
343 mx_rd_cnt = rx_cnt;
344 mx_wr_cnt = tx_cnt;
345 break;
346 case I2C_MSM_XFER_MODE_BLOCK:
347 mx_in_cnt = rx_cnt;
348 mx_out_cnt = tx_cnt;
349 break;
350 default:
351 break;
352 }
353 }
354
355 /* init DMA/BLOCK modes counter */
356 writel_relaxed(mx_in_cnt, base + QUP_MX_INPUT_COUNT);
357 writel_relaxed(mx_out_cnt, base + QUP_MX_OUTPUT_COUNT);
358
359 /* int FIFO mode counter */
360 writel_relaxed(mx_rd_cnt, base + QUP_MX_READ_COUNT);
361 writel_relaxed(mx_wr_cnt, base + QUP_MX_WRITE_COUNT);
362
363 /*
364 * Set QUP mini-core to I2C tags ver-2
365 * sets NO_INPUT / NO_OUTPUT as needed
366 */
367 config_reg = readl_relaxed(base + QUP_CONFIG);
368 config_reg &=
369 ~(QUP_NO_INPUT | QUP_NO_OUTPUT | QUP_N_MASK | QUP_MINI_CORE_MASK);
370 config_reg |= (no_input | no_output | QUP_N_VAL |
371 QUP_MINI_CORE_I2C_VAL);
372 writel_relaxed(config_reg, base + QUP_CONFIG);
373
374 /*
375 * Turns-on packing/unpacking
376 * sets NO_INPUT / NO_OUTPUT as needed
377 */
378 io_modes_reg = readl_relaxed(base + QUP_IO_MODES);
379 io_modes_reg &=
380 ~(QUP_INPUT_MODE | QUP_OUTPUT_MODE | QUP_PACK_EN | QUP_UNPACK_EN
381 | QUP_OUTPUT_BIT_SHIFT_EN);
382 io_modes_reg |=
383 (input_mode | output_mode | QUP_PACK_EN | QUP_UNPACK_EN);
384 writel_relaxed(io_modes_reg, base + QUP_IO_MODES);
385
386 /*
387 * mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
388 * change on DMA-mode transfers
389 */
390 op_mask = (xfer->mode_id == I2C_MSM_XFER_MODE_DMA) ?
391 (QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK) : 0;
392 writel_relaxed(op_mask, base + QUP_OPERATIONAL_MASK);
393 /* Ensure that QUP configuration is written before leaving this func */
394 wmb();
395}
396
397/*
398 * i2c_msm_clk_div_fld:
399 * @clk_freq_out output clock frequency
400 * @fs_div fs divider value
401 * @ht_div high time divider value
402 */
403struct i2c_msm_clk_div_fld {
404 u32 clk_freq_out;
405 u8 fs_div;
406 u8 ht_div;
407};
408
409/*
410 * divider values as per HW Designers
411 */
412static struct i2c_msm_clk_div_fld i2c_msm_clk_div_map[] = {
413 {KHz(100), 124, 62},
414 {KHz(400), 28, 14},
415 {KHz(1000), 8, 5},
416};
417
418/*
419 * @return zero on success
420 * @fs_div when zero use value from table above, otherwise use given value
421 * @ht_div when zero use value from table above, otherwise use given value
422 *
423 * Format the value to be configured into the clock divider register. This
424 * register is configured every time core is moved from reset to run state.
425 */
426static int i2c_msm_set_mstr_clk_ctl(struct i2c_msm_ctrl *ctrl, int fs_div,
427 int ht_div, int noise_rjct_scl, int noise_rjct_sda)
428{
429 int ret = 0;
430 int i;
431 u32 reg_val = 0;
432 struct i2c_msm_clk_div_fld *itr = i2c_msm_clk_div_map;
433
434 /* set noise rejection values for scl and sda */
435 reg_val = I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rjct_scl);
436 reg_val = I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rjct_sda);
437
438 /*
439 * find matching freq and set divider values unless they are forced
440 * from parametr list
441 */
442 for (i = 0; i < ARRAY_SIZE(i2c_msm_clk_div_map); ++i, ++itr) {
443 if (ctrl->rsrcs.clk_freq_out == itr->clk_freq_out) {
444 if (!fs_div)
445 fs_div = itr->fs_div;
446 if (!ht_div)
447 ht_div = itr->ht_div;
448 break;
449 }
450 }
451 if (!fs_div) {
452 dev_err(ctrl->dev, "For non-standard clock freq:%dKHz\n"
453 "clk divider value fs_div should be supply by client through\n"
454 "device tree\n", (ctrl->rsrcs.clk_freq_out / 1000));
455 return -EINVAL;
456 }
457
458 /* format values in clk-ctl cache */
459 ctrl->mstr_clk_ctl = (reg_val & (~0xff07ff)) | ((ht_div & 0xff) << 16)
460 |(fs_div & 0xff);
461
462 return ret;
463}
464
465/*
466 * i2c_msm_qup_xfer_init_run_state: set qup regs which must be set *after* reset
467 */
468static void i2c_msm_qup_xfer_init_run_state(struct i2c_msm_ctrl *ctrl)
469{
470 void __iomem *base = ctrl->rsrcs.base;
471
472 writel_relaxed(ctrl->mstr_clk_ctl, base + QUP_I2C_MASTER_CLK_CTL);
473
474 /* Ensure that QUP configuration is written before leaving this func */
475 wmb();
476
477 if (ctrl->dbgfs.dbg_lvl == MSM_DBG) {
478 dev_info(ctrl->dev,
479 "QUP state after programming for next transfers\n");
480 i2c_msm_dbg_qup_reg_dump(ctrl);
481 }
482}
483
484static void i2c_msm_fifo_wr_word(struct i2c_msm_ctrl *ctrl, u32 data)
485{
486 writel_relaxed(data, ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
487 i2c_msm_dbg(ctrl, MSM_DBG, "OUT-FIFO:0x%08x", data);
488}
489
490static u32 i2c_msm_fifo_rd_word(struct i2c_msm_ctrl *ctrl, u32 *data)
491{
492 u32 val;
493
494 val = readl_relaxed(ctrl->rsrcs.base + QUP_IN_FIFO_BASE);
495 i2c_msm_dbg(ctrl, MSM_DBG, "IN-FIFO :0x%08x", val);
496
497 if (data)
498 *data = val;
499
500 return val;
501}
502
503/*
504 * i2c_msm_fifo_wr_buf_flush:
505 */
506static void i2c_msm_fifo_wr_buf_flush(struct i2c_msm_ctrl *ctrl)
507{
508 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
509 u32 *word;
510
511 if (!fifo->out_buf_idx)
512 return;
513
514 word = (u32 *) fifo->out_buf;
515 i2c_msm_fifo_wr_word(ctrl, *word);
516 fifo->out_buf_idx = 0;
517 *word = 0;
518}
519
520/*
521 * i2c_msm_fifo_wr_buf:
522 *
523 * @len buf size (in bytes)
524 * @return number of bytes from buf which have been processed (written to
525 * FIFO or kept in out buffer and will be written later)
526 */
527static size_t
528i2c_msm_fifo_wr_buf(struct i2c_msm_ctrl *ctrl, u8 *buf, size_t len)
529{
530 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
531 int i;
532
533 for (i = 0 ; i < len; ++i, ++buf) {
534
535 fifo->out_buf[fifo->out_buf_idx] = *buf;
536 ++fifo->out_buf_idx;
537
538 if (fifo->out_buf_idx == 4) {
539 u32 *word = (u32 *) fifo->out_buf;
540
541 i2c_msm_fifo_wr_word(ctrl, *word);
542 fifo->out_buf_idx = 0;
543 *word = 0;
544 }
545 }
546 return i;
547}
548
549static size_t i2c_msm_fifo_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
550{
551 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
552 size_t len = 0;
553
554 if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
555 char str[I2C_MSM_REG_2_STR_BUF_SZ];
556
557 dev_info(ctrl->dev, "tag.val:0x%llx tag.len:%d %s\n",
558 buf->out_tag.val, buf->out_tag.len,
559 i2c_msm_dbg_tag_to_str(&buf->out_tag, str,
560 sizeof(str)));
561 }
562
563 if (buf->out_tag.len) {
564 len = i2c_msm_fifo_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
565 buf->out_tag.len);
566
567 if (len < buf->out_tag.len)
568 goto done;
569
570 buf->out_tag = (struct i2c_msm_tag) {0};
571 }
572done:
573 return len;
574}
575
576/*
577 * i2c_msm_fifo_read: reads up to fifo size into user's buf
578 */
579static void i2c_msm_fifo_read_xfer_buf(struct i2c_msm_ctrl *ctrl)
580{
581 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
582 struct i2c_msg *msg = ctrl->xfer.msgs + buf->msg_idx;
583 u8 *p_tag_val = (u8 *) &buf->in_tag.val;
584 int buf_need_bc = msg->len - buf->byte_idx;
585 u8 word[4];
586 int copy_bc;
587 int word_idx;
588 int word_bc;
589
590 if (!buf->is_rx)
591 return;
592
593 while (buf_need_bc || buf->in_tag.len) {
594 i2c_msm_fifo_rd_word(ctrl, (u32 *) word);
595 word_bc = sizeof(word);
596 word_idx = 0;
597
598 /*
599 * copy bytes from fifo word to tag.
600 * @note buf->in_tag.len (max 2bytes) < word_bc (4bytes)
601 */
602 if (buf->in_tag.len) {
603 copy_bc = min_t(int, word_bc, buf->in_tag.len);
604
605 memcpy(p_tag_val + buf->in_tag.len, word, copy_bc);
606
607 word_idx += copy_bc;
608 word_bc -= copy_bc;
609 buf->in_tag.len -= copy_bc;
610
611 if ((ctrl->dbgfs.dbg_lvl >= MSM_DBG) &&
612 !buf->in_tag.len) {
613 char str[64];
614
615 dev_info(ctrl->dev, "%s\n",
616 i2c_msm_dbg_tag_to_str(&buf->in_tag,
617 str, sizeof(str)));
618 }
619 }
620
621 /* copy bytes from fifo word to user's buffer */
622 copy_bc = min_t(int, word_bc, buf_need_bc);
623 memcpy(msg->buf + buf->byte_idx, word + word_idx, copy_bc);
624
625 buf->byte_idx += copy_bc;
626 buf_need_bc -= copy_bc;
627 }
628}
629
630/*
631 * i2c_msm_fifo_write_xfer_buf: write xfer.cur_buf (user's-buf + tag) to fifo
632 */
633static void i2c_msm_fifo_write_xfer_buf(struct i2c_msm_ctrl *ctrl)
634{
635 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
636 size_t len;
637 size_t tag_len;
638
639 tag_len = buf->out_tag.len;
640 len = i2c_msm_fifo_xfer_wr_tag(ctrl);
641 if (len < tag_len) {
642 dev_err(ctrl->dev, "error on writing tag to out FIFO\n");
643 return;
644 }
645
646 if (!buf->is_rx) {
647 if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
648 char str[I2C_MSM_REG_2_STR_BUF_SZ];
649 int offset = 0;
650 u8 *p = i2c_msm_buf_to_ptr(buf);
651 int i;
652
653 for (i = 0 ; i < len; ++i, ++p)
654 offset += snprintf(str + offset,
655 sizeof(str) - offset,
656 "0x%x ", *p);
657 dev_info(ctrl->dev, "data: %s\n", str);
658 }
659
660 len = i2c_msm_fifo_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf),
661 buf->len);
662 if (len < buf->len)
663 dev_err(ctrl->dev, "error on xfering buf with FIFO\n");
664 }
665}
666
667/*
668 * i2c_msm_fifo_xfer_process:
669 *
670 * @pre transfer size is less then or equal to fifo size.
671 * @pre QUP in run state/pause
672 * @return zero on success
673 */
674static int i2c_msm_fifo_xfer_process(struct i2c_msm_ctrl *ctrl)
675{
676 struct i2c_msm_xfer_buf first_buf = ctrl->xfer.cur_buf;
677 int ret;
678
679 /* load fifo while in pause state to avoid race conditions */
680 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
681 if (ret < 0)
682 return ret;
683
684 /* write all that goes to output fifo */
685 while (i2c_msm_xfer_next_buf(ctrl))
686 i2c_msm_fifo_write_xfer_buf(ctrl);
687
688 i2c_msm_fifo_wr_buf_flush(ctrl);
689
690 ctrl->xfer.cur_buf = first_buf;
691
692 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
693 if (ret < 0)
694 return ret;
695
696 /* wait for input done interrupt */
697 ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
698 if (ret < 0)
699 return ret;
700
701 /* read all from input fifo */
702 while (i2c_msm_xfer_next_buf(ctrl))
703 i2c_msm_fifo_read_xfer_buf(ctrl);
704
705 return 0;
706}
707
708/*
709 * i2c_msm_fifo_xfer: process transfer using fifo mode
710 */
711static int i2c_msm_fifo_xfer(struct i2c_msm_ctrl *ctrl)
712{
713 int ret;
714
715 i2c_msm_dbg(ctrl, MSM_DBG, "Starting FIFO transfer");
716
717 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
718 if (ret < 0)
719 return ret;
720
721 /* program qup registers */
722 i2c_msm_qup_xfer_init_reset_state(ctrl);
723
724 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
725 if (ret < 0)
726 return ret;
727
728 /* program qup registers which must be set *after* reset */
729 i2c_msm_qup_xfer_init_run_state(ctrl);
730
731 ret = i2c_msm_fifo_xfer_process(ctrl);
732
733 return ret;
734}
735
736/*
737 * i2c_msm_blk_init_struct: Allocate memory and initialize blk structure
738 *
739 * @return 0 on success or error code
740 */
741static int i2c_msm_blk_init_struct(struct i2c_msm_ctrl *ctrl)
742{
743 u32 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
744 int ret;
745 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
746
747 blk->in_blk_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data),
748 blk->out_blk_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data),
749
750 blk->tx_cache = kmalloc(blk->out_blk_sz, GFP_KERNEL);
751 if (!blk->tx_cache) {
752 ret = -ENOMEM;
753 goto out_buf_err;
754 }
755
756 blk->rx_cache = kmalloc(blk->in_blk_sz, GFP_KERNEL);
757 if (!blk->tx_cache) {
758 dev_err(ctrl->dev,
759 "error on allocating memory for block tx_cache. malloc(size:%zu)\n",
760 blk->out_blk_sz);
761 ret = -ENOMEM;
762 goto in_buf_err;
763 }
764
765 blk->is_init = true;
766 return 0;
767
768in_buf_err:
769 kfree(blk->tx_cache);
770out_buf_err:
771
772 return ret;
773}
774
775/*
776 * i2c_msm_blk_wr_flush: flushes internal cached block to FIFO
777 *
778 * @return 0 on success or error code
779 */
780static int i2c_msm_blk_wr_flush(struct i2c_msm_ctrl *ctrl)
781{
782 int byte_num;
783 int ret = 0;
784 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
785 u32 *buf_u32_ptr;
786
787 if (!blk->tx_cache_idx)
788 return 0;
789
790 /* if no blocks available wait for interrupt */
791 ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_tx_blk);
792 if (ret)
793 return ret;
794
795 /*
796 * pause the controller until we finish loading the block in order to
797 * avoid race conditions
798 */
799 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
800 if (ret < 0)
801 return ret;
802 i2c_msm_dbg(ctrl, MSM_DBG, "OUT-BLK:%*phC", blk->tx_cache_idx,
803 blk->tx_cache);
804
805 for (byte_num = 0; byte_num < blk->tx_cache_idx;
806 byte_num += sizeof(u32)) {
807 buf_u32_ptr = (u32 *) (blk->tx_cache + byte_num);
808 writel_relaxed(*buf_u32_ptr,
809 ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
810 *buf_u32_ptr = 0;
811 }
812
813 /* now cache is empty */
814 blk->tx_cache_idx = 0;
815 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
816 if (ret < 0)
817 return ret;
818
819 return ret;
820}
821
822/*
823 * i2c_msm_blk_wr_buf:
824 *
825 * @len buf size (in bytes)
826 * @return number of bytes from buf which have been processed (written to
827 * FIFO or kept in out buffer and will be written later)
828 */
829static int
830i2c_msm_blk_wr_buf(struct i2c_msm_ctrl *ctrl, const u8 *buf, int len)
831{
832 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
833 int byte_num;
834 int ret = 0;
835
836 for (byte_num = 0; byte_num < len; ++byte_num, ++buf) {
837 blk->tx_cache[blk->tx_cache_idx] = *buf;
838 ++blk->tx_cache_idx;
839
840 /* flush cached buffer to HW FIFO when full */
841 if (blk->tx_cache_idx == blk->out_blk_sz) {
842 ret = i2c_msm_blk_wr_flush(ctrl);
843 if (ret)
844 return ret;
845 }
846 }
847 return byte_num;
848}
849
850/*
851 * i2c_msm_blk_xfer_wr_tag: buffered writing the tag of current buf
852 * @return zero on success
853 */
854static int i2c_msm_blk_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
855{
856 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
857 int len = 0;
858
859 if (!buf->out_tag.len)
860 return 0;
861
862 len = i2c_msm_blk_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
863 buf->out_tag.len);
864 if (len != buf->out_tag.len)
865 return -EFAULT;
866
867 buf->out_tag = (struct i2c_msm_tag) {0};
868 return 0;
869}
870
871/*
872 * i2c_msm_blk_wr_xfer_buf: writes ctrl->xfer.cur_buf to HW
873 *
874 * @return zero on success
875 */
876static int i2c_msm_blk_wr_xfer_buf(struct i2c_msm_ctrl *ctrl)
877{
878 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
879 int len;
880 int ret;
881
882 ret = i2c_msm_blk_xfer_wr_tag(ctrl);
883 if (ret)
884 return ret;
885
886 len = i2c_msm_blk_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf), buf->len);
887 if (len < buf->len)
888 return -EFAULT;
889
890 buf->byte_idx += len;
891 return 0;
892}
893
894/*
895 * i2c_msm_blk_rd_blk: read a block from HW FIFO to internal cache
896 *
897 * @return number of bytes read or negative error value
898 * @need_bc number of bytes that we need
899 *
900 * uses internal counter to keep track of number of available blocks. When
901 * zero, waits for interrupt.
902 */
903static int i2c_msm_blk_rd_blk(struct i2c_msm_ctrl *ctrl, int need_bc)
904{
905 int byte_num;
906 int ret = 0;
907 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
908 u32 *cache_ptr = (u32 *) blk->rx_cache;
909 int read_bc = min_t(int, blk->in_blk_sz, need_bc);
910
911 /* wait for block avialble interrupt */
912 ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_rx_blk);
913 if (ret)
914 return ret;
915
916 /* Read block from HW to cache */
917 for (byte_num = 0; byte_num < blk->in_blk_sz;
918 byte_num += sizeof(u32)) {
919 if (byte_num < read_bc) {
920 *cache_ptr = readl_relaxed(ctrl->rsrcs.base +
921 QUP_IN_FIFO_BASE);
922 ++cache_ptr;
923 }
924 }
925 blk->rx_cache_idx = 0;
926 return read_bc;
927}
928
929/*
930 * i2c_msm_blk_rd_xfer_buf: fill in ctrl->xfer.cur_buf from HW
931 *
932 * @return zero on success
933 */
934static int i2c_msm_blk_rd_xfer_buf(struct i2c_msm_ctrl *ctrl)
935{
936 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
937 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
938 struct i2c_msg *msg = ctrl->xfer.msgs + buf->msg_idx;
939 int copy_bc; /* number of bytes to copy to user's buffer */
940 int cache_avail_bc;
941 int ret = 0;
942
943 /* write tag to out FIFO */
944 ret = i2c_msm_blk_xfer_wr_tag(ctrl);
945 if (ret)
946 return ret;
947 i2c_msm_blk_wr_flush(ctrl);
948
949 while (buf->len || buf->in_tag.len) {
950 cache_avail_bc = i2c_msm_blk_rd_blk(ctrl,
951 buf->len + buf->in_tag.len);
952
953 i2c_msm_dbg(ctrl, MSM_DBG, "IN-BLK:%*phC\n", cache_avail_bc,
954 blk->rx_cache + blk->rx_cache_idx);
955
956 if (cache_avail_bc < 0)
957 return cache_avail_bc;
958
959 /* discard tag from input FIFO */
960 if (buf->in_tag.len) {
961 int discard_bc = min_t(int, cache_avail_bc,
962 buf->in_tag.len);
963 blk->rx_cache_idx += discard_bc;
964 buf->in_tag.len -= discard_bc;
965 cache_avail_bc -= discard_bc;
966 }
967
968 /* copy bytes from cached block to user's buffer */
969 copy_bc = min_t(int, cache_avail_bc, buf->len);
970 memcpy(msg->buf + buf->byte_idx,
971 blk->rx_cache + blk->rx_cache_idx, copy_bc);
972
973 blk->rx_cache_idx += copy_bc;
974 buf->len -= copy_bc;
975 buf->byte_idx += copy_bc;
976 }
977 return ret;
978}
979
980/*
981 * i2c_msm_blk_xfer: process transfer using block mode
982 */
983static int i2c_msm_blk_xfer(struct i2c_msm_ctrl *ctrl)
984{
985 int ret = 0;
986 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
987 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
988
989 if (!blk->is_init) {
990 ret = i2c_msm_blk_init_struct(ctrl);
991 if (!blk->is_init)
992 return ret;
993 }
994
995 init_completion(&blk->wait_rx_blk);
996 init_completion(&blk->wait_tx_blk);
997
998 /* tx_cnt > 0 always */
999 blk->complete_mask = QUP_MAX_OUTPUT_DONE_FLAG;
1000 if (ctrl->xfer.rx_cnt)
1001 blk->complete_mask |= QUP_MAX_INPUT_DONE_FLAG;
1002
1003 /* initialize block mode for new transfer */
1004 blk->tx_cache_idx = 0;
1005 blk->rx_cache_idx = 0;
1006
1007 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1008 if (ret < 0)
1009 return ret;
1010
1011 /* program qup registers */
1012 i2c_msm_qup_xfer_init_reset_state(ctrl);
1013
1014 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1015 if (ret < 0)
1016 return ret;
1017
1018 /* program qup registers which must be set *after* reset */
1019 i2c_msm_qup_xfer_init_run_state(ctrl);
1020
1021 while (i2c_msm_xfer_next_buf(ctrl)) {
1022 if (buf->is_rx) {
1023 ret = i2c_msm_blk_rd_xfer_buf(ctrl);
1024 if (ret)
1025 return ret;
1026 /*
1027 * SW workaround to wait for extra interrupt from
1028 * hardware for last block in block mode for read
1029 */
1030 if (buf->is_last) {
1031 ret = i2c_msm_xfer_wait_for_completion(ctrl,
1032 &blk->wait_rx_blk);
1033 if (!ret)
1034 complete(&ctrl->xfer.complete);
1035 }
1036 } else {
1037 ret = i2c_msm_blk_wr_xfer_buf(ctrl);
1038 if (ret)
1039 return ret;
1040 }
1041 }
1042 i2c_msm_blk_wr_flush(ctrl);
1043 return i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
1044}
1045
1046/*
1047 * i2c_msm_dma_xfer_prepare: map DMA buffers, and create tags.
1048 * @return zero on success or negative error value
1049 */
1050static int i2c_msm_dma_xfer_prepare(struct i2c_msm_ctrl *ctrl)
1051{
1052 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1053 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
1054 struct i2c_msm_dma_chan *tx = &dma->chan[I2C_MSM_DMA_TX];
1055 struct i2c_msm_dma_chan *rx = &dma->chan[I2C_MSM_DMA_RX];
1056 struct i2c_msm_dma_buf *dma_buf;
1057 int rem_buf_cnt = I2C_MSM_DMA_DESC_ARR_SIZ;
1058 struct i2c_msg *cur_msg;
1059 enum dma_data_direction buf_dma_dirctn;
1060 struct i2c_msm_dma_mem data;
1061 u8 *tag_arr_itr_vrtl_addr;
1062 dma_addr_t tag_arr_itr_phy_addr;
1063
1064 tx->desc_cnt_cur = 0;
1065 rx->desc_cnt_cur = 0;
1066 dma->buf_arr_cnt = 0;
1067 dma_buf = dma->buf_arr;
1068 tag_arr_itr_vrtl_addr = ((u8 *) dma->tag_arr.vrtl_addr);
1069 tag_arr_itr_phy_addr = dma->tag_arr.phy_addr;
1070
1071 for (; i2c_msm_xfer_next_buf(ctrl) && rem_buf_cnt;
1072 ++dma_buf,
1073 tag_arr_itr_phy_addr += sizeof(dma_addr_t),
1074 tag_arr_itr_vrtl_addr += sizeof(dma_addr_t)) {
1075
1076 /* dma-map the client's message */
1077 cur_msg = ctrl->xfer.msgs + buf->msg_idx;
1078 data.vrtl_addr = cur_msg->buf + buf->byte_idx;
1079 if (buf->is_rx) {
1080 buf_dma_dirctn = DMA_FROM_DEVICE;
1081 rx->desc_cnt_cur += 2; /* msg + tag */
1082 tx->desc_cnt_cur += 1; /* tag */
1083 } else {
1084 buf_dma_dirctn = DMA_TO_DEVICE;
1085 tx->desc_cnt_cur += 2; /* msg + tag */
1086 }
1087
1088 /* for last buffer in a transfer msg */
1089 if (buf->is_last) {
1090 /* add ovrhead byte cnt for tags specific to DMA mode */
1091 ctrl->xfer.rx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags*/
1092 ctrl->xfer.tx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags */
1093
1094 /* increment rx desc cnt to read off tags and
1095 * increment tx desc cnt to queue EOT+FLUSH_STOP tags
1096 */
1097 tx->desc_cnt_cur++;
1098 rx->desc_cnt_cur++;
1099 }
1100
1101 if ((rx->desc_cnt_cur >= I2C_MSM_DMA_RX_SZ) ||
1102 (tx->desc_cnt_cur >= I2C_MSM_DMA_TX_SZ))
1103 return -ENOMEM;
1104
1105 data.phy_addr = dma_map_single(ctrl->dev, data.vrtl_addr,
1106 buf->len, buf_dma_dirctn);
1107
1108 if (dma_mapping_error(ctrl->dev, data.phy_addr)) {
1109 dev_err(ctrl->dev,
1110 "error DMA mapping DMA buffers, err:%lld buf_vrtl:0x%p data_len:%d dma_dir:%s\n",
1111 (u64) data.phy_addr, data.vrtl_addr, buf->len,
1112 ((buf_dma_dirctn == DMA_FROM_DEVICE)
1113 ? "DMA_FROM_DEVICE" : "DMA_TO_DEVICE"));
1114 return -EFAULT;
1115 }
1116
1117 /* copy 8 bytes. Only tag.len bytes will be used */
1118 *((u64 *)tag_arr_itr_vrtl_addr) = buf->out_tag.val;
1119
1120 i2c_msm_dbg(ctrl, MSM_DBG,
1121 "vrtl:0x%p phy:0x%llx val:0x%llx sizeof(dma_addr_t):%zu",
1122 tag_arr_itr_vrtl_addr, (u64) tag_arr_itr_phy_addr,
1123 *((u64 *)tag_arr_itr_vrtl_addr), sizeof(dma_addr_t));
1124
1125 /*
1126 * create dma buf, in the dma buf arr, based on the buf created
1127 * by i2c_msm_xfer_next_buf()
1128 */
1129 *dma_buf = (struct i2c_msm_dma_buf) {
1130 .ptr = data,
1131 .len = buf->len,
1132 .dma_dir = buf_dma_dirctn,
1133 .is_rx = buf->is_rx,
1134 .is_last = buf->is_last,
1135 .tag = (struct i2c_msm_dma_tag) {
1136 .buf = tag_arr_itr_phy_addr,
1137 .len = buf->out_tag.len,
1138 },
1139 };
1140 ++dma->buf_arr_cnt;
1141 --rem_buf_cnt;
1142 }
1143 return 0;
1144}
1145
1146/*
1147 * i2c_msm_dma_xfer_unprepare: DAM unmap buffers.
1148 */
1149static void i2c_msm_dma_xfer_unprepare(struct i2c_msm_ctrl *ctrl)
1150{
1151 int i;
1152 struct i2c_msm_dma_buf *buf_itr = ctrl->xfer.dma.buf_arr;
1153
1154 for (i = 0 ; i < ctrl->xfer.dma.buf_arr_cnt ; ++i, ++buf_itr)
1155 dma_unmap_single(ctrl->dev, buf_itr->ptr.phy_addr, buf_itr->len,
1156 buf_itr->dma_dir);
1157}
1158
1159static void i2c_msm_dma_callback_tx_complete(void *dma_async_param)
1160{
1161 struct i2c_msm_ctrl *ctrl = dma_async_param;
1162
1163 complete(&ctrl->xfer.complete);
1164}
1165
1166static void i2c_msm_dma_callback_rx_complete(void *dma_async_param)
1167{
1168 struct i2c_msm_ctrl *ctrl = dma_async_param;
1169
1170 complete(&ctrl->xfer.rx_complete);
1171}
1172
1173/*
1174 * i2c_msm_dma_xfer_process: Queue transfers to DMA
1175 * @pre 1)QUP is in run state. 2) i2c_msm_dma_xfer_prepare() was called.
1176 * @return zero on success or negative error value
1177 */
1178static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
1179{
1180 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1181 struct i2c_msm_dma_chan *tx = &dma->chan[I2C_MSM_DMA_TX];
1182 struct i2c_msm_dma_chan *rx = &dma->chan[I2C_MSM_DMA_RX];
1183 struct scatterlist *sg_rx = NULL;
1184 struct scatterlist *sg_rx_itr = NULL;
1185 struct scatterlist *sg_tx = NULL;
1186 struct scatterlist *sg_tx_itr = NULL;
1187 struct dma_async_tx_descriptor *dma_desc_rx;
1188 struct dma_async_tx_descriptor *dma_desc_tx;
1189 struct i2c_msm_dma_buf *buf_itr;
1190 int i;
1191 int ret = 0;
1192
1193 i2c_msm_dbg(ctrl, MSM_DBG, "Going to enqueue %zu buffers in DMA",
1194 dma->buf_arr_cnt);
1195
1196 /* Set the QUP State to pause while DMA completes the txn */
1197 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
1198 if (ret) {
1199 dev_err(ctrl->dev, "transition to pause state failed before DMA transaction :%d\n",
1200 ret);
1201 return ret;
1202 }
1203
1204 sg_tx = kcalloc(tx->desc_cnt_cur, sizeof(struct scatterlist),
1205 GFP_KERNEL);
1206 if (!sg_tx) {
1207 ret = -ENOMEM;
1208 goto dma_xfer_end;
1209 }
1210 sg_init_table(sg_tx, tx->desc_cnt_cur);
1211 sg_tx_itr = sg_tx;
1212
1213 sg_rx = kcalloc(rx->desc_cnt_cur, sizeof(struct scatterlist),
1214 GFP_KERNEL);
1215 if (!sg_rx) {
1216 ret = -ENOMEM;
1217 goto dma_xfer_end;
1218 }
1219 sg_init_table(sg_rx, rx->desc_cnt_cur);
1220 sg_rx_itr = sg_rx;
1221
1222 buf_itr = dma->buf_arr;
1223
1224 for (i = 0; i < dma->buf_arr_cnt ; ++i, ++buf_itr) {
1225 /* Queue tag */
1226 sg_dma_address(sg_tx_itr) = buf_itr->tag.buf;
1227 sg_dma_len(sg_tx_itr) = buf_itr->tag.len;
1228 ++sg_tx_itr;
1229
1230 /* read off tag + len bytes(don't care) in input FIFO
1231 * on read transfer
1232 */
1233 if (buf_itr->is_rx) {
1234 /* rid of input tag */
1235 sg_dma_address(sg_rx_itr) =
1236 ctrl->xfer.dma.input_tag.phy_addr;
1237 sg_dma_len(sg_rx_itr) = QUP_BUF_OVERHD_BC;
1238 ++sg_rx_itr;
1239
1240 /* queue data buffer */
1241 sg_dma_address(sg_rx_itr) = buf_itr->ptr.phy_addr;
1242 sg_dma_len(sg_rx_itr) = buf_itr->len;
1243 ++sg_rx_itr;
1244 } else {
1245 sg_dma_address(sg_tx_itr) = buf_itr->ptr.phy_addr;
1246 sg_dma_len(sg_tx_itr) = buf_itr->len;
1247 ++sg_tx_itr;
1248 }
1249 }
1250
1251 /* this tag will be copied to rx fifo */
1252 sg_dma_address(sg_tx_itr) = dma->eot_n_flush_stop_tags.phy_addr;
1253 sg_dma_len(sg_tx_itr) = QUP_BUF_OVERHD_BC;
1254 ++sg_tx_itr;
1255
1256 /*
1257 * Reading the tag off the input fifo has side effects and
1258 * it is mandatory for getting the DMA's interrupt.
1259 */
1260 sg_dma_address(sg_rx_itr) = ctrl->xfer.dma.input_tag.phy_addr;
1261 sg_dma_len(sg_rx_itr) = QUP_BUF_OVERHD_BC;
1262 ++sg_rx_itr;
1263
1264 /*
1265 * We only want a single BAM interrupt per transfer, and we always
1266 * add a flush-stop i2c tag as the last tx sg entry. Since the dma
1267 * driver puts the supplied BAM flags only on the last BAM descriptor,
1268 * the flush stop will always be the one which generate that interrupt
1269 * and invokes the callback.
1270 */
1271 dma_desc_tx = dmaengine_prep_slave_sg(tx->dma_chan,
1272 sg_tx,
1273 sg_tx_itr - sg_tx,
1274 tx->dir,
1275 (SPS_IOVEC_FLAG_EOT |
1276 SPS_IOVEC_FLAG_NWD));
Shrey Vijay56ab5692018-02-22 15:03:03 +05301277 if (IS_ERR_OR_NULL(dma_desc_tx)) {
Shrey Vijayd494f5e2017-07-24 16:08:33 +05301278 dev_err(ctrl->dev, "error dmaengine_prep_slave_sg tx:%ld\n",
1279 PTR_ERR(dma_desc_tx));
Shrey Vijay56ab5692018-02-22 15:03:03 +05301280 ret = dma_desc_tx ? PTR_ERR(dma_desc_tx) : -ENOMEM;
Shrey Vijayd494f5e2017-07-24 16:08:33 +05301281 goto dma_xfer_end;
1282 }
1283
1284 /* callback defined for tx dma desc */
1285 dma_desc_tx->callback = i2c_msm_dma_callback_tx_complete;
1286 dma_desc_tx->callback_param = ctrl;
1287 dmaengine_submit(dma_desc_tx);
1288 dma_async_issue_pending(tx->dma_chan);
1289
1290 /* queue the rx dma desc */
1291 dma_desc_rx = dmaengine_prep_slave_sg(rx->dma_chan, sg_rx,
1292 sg_rx_itr - sg_rx, rx->dir,
1293 (SPS_IOVEC_FLAG_EOT |
1294 SPS_IOVEC_FLAG_NWD));
Shrey Vijay56ab5692018-02-22 15:03:03 +05301295 if (IS_ERR_OR_NULL(dma_desc_rx)) {
Shrey Vijayd494f5e2017-07-24 16:08:33 +05301296 dev_err(ctrl->dev,
1297 "error dmaengine_prep_slave_sg rx:%ld\n",
1298 PTR_ERR(dma_desc_rx));
Shrey Vijay56ab5692018-02-22 15:03:03 +05301299 ret = dma_desc_rx ? PTR_ERR(dma_desc_rx) : -ENOMEM;
Shrey Vijayd494f5e2017-07-24 16:08:33 +05301300 goto dma_xfer_end;
1301 }
1302
1303 dma_desc_rx->callback = i2c_msm_dma_callback_rx_complete;
1304 dma_desc_rx->callback_param = ctrl;
1305 dmaengine_submit(dma_desc_rx);
1306 dma_async_issue_pending(rx->dma_chan);
1307
1308 /* Set the QUP State to Run when completes the txn */
1309 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1310 if (ret) {
1311 dev_err(ctrl->dev, "transition to run state failed before DMA transaction :%d\n",
1312 ret);
1313 goto dma_xfer_end;
1314 }
1315
1316 ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
1317 if (!ret && ctrl->xfer.rx_cnt)
1318 ret = i2c_msm_xfer_wait_for_completion(ctrl,
1319 &ctrl->xfer.rx_complete);
1320
1321dma_xfer_end:
1322 /* free scatter-gather lists */
1323 kfree(sg_tx);
1324 kfree(sg_rx);
1325
1326 return ret;
1327}
1328
1329static void i2c_msm_dma_free_channels(struct i2c_msm_ctrl *ctrl)
1330{
1331 int i;
1332
1333 for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
1334 struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
1335
1336 if (!chan->is_init)
1337 continue;
1338
1339 dma_release_channel(chan->dma_chan);
1340 chan->is_init = false;
1341 chan->dma_chan = NULL;
1342 }
1343 if (ctrl->xfer.dma.state > I2C_MSM_DMA_INIT_CORE)
1344 ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CORE;
1345}
1346
1347static const char * const i2c_msm_dma_chan_name[] = {"tx", "rx"};
1348
1349static int i2c_msm_dmaengine_dir[] = {
1350 DMA_MEM_TO_DEV, DMA_DEV_TO_MEM
1351};
1352
1353static int i2c_msm_dma_init_channels(struct i2c_msm_ctrl *ctrl)
1354{
1355 int ret = 0;
1356 int i;
1357
1358 /* Iterate over the dma channels to initialize them */
1359 for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
1360 struct dma_slave_config cfg = {0};
1361 struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
1362
1363 if (chan->is_init)
1364 continue;
1365
1366 chan->name = i2c_msm_dma_chan_name[i];
1367 chan->dma_chan = dma_request_slave_channel(ctrl->dev,
1368 chan->name);
1369 if (!chan->dma_chan) {
1370 dev_err(ctrl->dev,
1371 "error dma_request_slave_channel(dev:%s chan:%s)\n",
1372 dev_name(ctrl->dev), chan->name);
1373 /* free the channels if allocated before */
1374 i2c_msm_dma_free_channels(ctrl);
1375 return -ENODEV;
1376 }
1377
1378 chan->dir = cfg.direction = i2c_msm_dmaengine_dir[i];
1379 ret = dmaengine_slave_config(chan->dma_chan, &cfg);
1380 if (ret) {
1381 dev_err(ctrl->dev,
1382 "error:%d dmaengine_slave_config(chan:%s)\n",
1383 ret, chan->name);
1384 dma_release_channel(chan->dma_chan);
1385 chan->dma_chan = NULL;
1386 i2c_msm_dma_free_channels(ctrl);
1387 return ret;
1388 }
1389 chan->is_init = true;
1390 }
1391 ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CHAN;
1392 return 0;
1393}
1394
1395static void i2c_msm_dma_teardown(struct i2c_msm_ctrl *ctrl)
1396{
1397 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1398
1399 i2c_msm_dma_free_channels(ctrl);
1400
1401 if (dma->state > I2C_MSM_DMA_INIT_NONE)
1402 dma_free_coherent(ctrl->dev, I2C_MSM_DMA_TAG_MEM_SZ,
1403 dma->input_tag.vrtl_addr,
1404 dma->input_tag.phy_addr);
1405
1406 dma->state = I2C_MSM_DMA_INIT_NONE;
1407}
1408
1409static int i2c_msm_dma_init(struct i2c_msm_ctrl *ctrl)
1410{
1411 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1412 u8 *tags_space_virt_addr;
1413 dma_addr_t tags_space_phy_addr;
1414
1415 /* check if DMA core is initialized */
1416 if (dma->state > I2C_MSM_DMA_INIT_NONE)
1417 goto dma_core_is_init;
1418
1419 /*
1420 * allocate dma memory for input_tag + eot_n_flush_stop_tags + tag_arr
1421 * for more see: I2C_MSM_DMA_TAG_MEM_SZ definition
1422 */
1423 tags_space_virt_addr = dma_alloc_coherent(
1424 ctrl->dev,
1425 I2C_MSM_DMA_TAG_MEM_SZ,
1426 &tags_space_phy_addr,
1427 GFP_KERNEL);
1428 if (!tags_space_virt_addr) {
1429 dev_err(ctrl->dev,
1430 "error alloc %d bytes of DMAable memory for DMA tags space\n",
1431 I2C_MSM_DMA_TAG_MEM_SZ);
1432 return -ENOMEM;
1433 }
1434
1435 /*
1436 * set the dma-tags virtual and physical addresses:
1437 * 1) the first tag space is for the input (throw away) tag
1438 */
1439 dma->input_tag.vrtl_addr = tags_space_virt_addr;
1440 dma->input_tag.phy_addr = tags_space_phy_addr;
1441
1442 /* 2) second tag space is for eot_flush_stop tag which is const value */
1443 tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
1444 tags_space_phy_addr += I2C_MSM_TAG2_MAX_LEN;
1445 dma->eot_n_flush_stop_tags.vrtl_addr = tags_space_virt_addr;
1446 dma->eot_n_flush_stop_tags.phy_addr = tags_space_phy_addr;
1447
1448 /* set eot_n_flush_stop_tags value */
1449 *((u16 *) dma->eot_n_flush_stop_tags.vrtl_addr) =
1450 QUP_TAG2_INPUT_EOT | (QUP_TAG2_FLUSH_STOP << 8);
1451
1452 /* 3) all other tag spaces are used for transfer tags */
1453 tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
1454 tags_space_phy_addr += I2C_MSM_TAG2_MAX_LEN;
1455 dma->tag_arr.vrtl_addr = tags_space_virt_addr;
1456 dma->tag_arr.phy_addr = tags_space_phy_addr;
1457
1458 dma->state = I2C_MSM_DMA_INIT_CORE;
1459
1460dma_core_is_init:
1461 return i2c_msm_dma_init_channels(ctrl);
1462}
1463
1464static int i2c_msm_dma_xfer(struct i2c_msm_ctrl *ctrl)
1465{
1466 int ret;
1467
1468 ret = i2c_msm_dma_init(ctrl);
1469 if (ret) {
1470 dev_err(ctrl->dev, "DMA Init Failed: %d\n", ret);
1471 return ret;
1472 }
1473
1474 /* dma map user's buffers and create tags */
1475 ret = i2c_msm_dma_xfer_prepare(ctrl);
1476 if (ret < 0) {
1477 dev_err(ctrl->dev, "error on i2c_msm_dma_xfer_prepare():%d\n",
1478 ret);
1479 goto err_dma_xfer;
1480 }
1481
1482 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1483 if (ret < 0)
1484 goto err_dma_xfer;
1485
1486 /* program qup registers */
1487 i2c_msm_qup_xfer_init_reset_state(ctrl);
1488
1489 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1490 if (ret < 0)
1491 goto err_dma_xfer;
1492
1493 /* program qup registers which must be set *after* reset */
1494 i2c_msm_qup_xfer_init_run_state(ctrl);
1495
1496 /* enqueue transfer buffers */
1497 ret = i2c_msm_dma_xfer_process(ctrl);
1498 if (ret)
1499 dev_err(ctrl->dev,
1500 "error i2c_msm_dma_xfer_process(n_bufs:%zu):%d\n",
1501 ctrl->xfer.dma.buf_arr_cnt, ret);
1502
1503err_dma_xfer:
1504 i2c_msm_dma_xfer_unprepare(ctrl);
1505 return ret;
1506}
1507
1508/*
1509 * i2c_msm_qup_slv_holds_bus: true when slave hold the SDA low
1510 */
1511static bool i2c_msm_qup_slv_holds_bus(struct i2c_msm_ctrl *ctrl)
1512{
1513 u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
1514
1515 bool slv_holds_bus = !(status & QUP_I2C_SDA) &&
1516 (status & QUP_BUS_ACTIVE) &&
1517 !(status & QUP_BUS_MASTER);
1518 if (slv_holds_bus)
1519 dev_info(ctrl->dev,
1520 "bus lines held low by a slave detected\n");
1521
1522 return slv_holds_bus;
1523}
1524
1525/*
1526 * i2c_msm_qup_poll_bus_active_unset: poll until QUP_BUS_ACTIVE is unset
1527 *
1528 * @return zero when bus inactive, or nonzero on timeout.
1529 *
1530 * Loop and reads QUP_I2C_MASTER_STATUS until bus is inactive or timeout
1531 * reached. Used to avoid race condition due to gap between QUP completion
1532 * interrupt and QUP issuing stop signal on the bus.
1533 */
1534static int i2c_msm_qup_poll_bus_active_unset(struct i2c_msm_ctrl *ctrl)
1535{
1536 void __iomem *base = ctrl->rsrcs.base;
1537 ulong timeout = jiffies + msecs_to_jiffies(I2C_MSM_MAX_POLL_MSEC);
1538 int ret = 0;
1539 size_t read_cnt = 0;
1540
1541 do {
1542 if (!(readl_relaxed(base + QUP_I2C_STATUS) & QUP_BUS_ACTIVE))
1543 goto poll_active_end;
1544 ++read_cnt;
1545 } while (time_before_eq(jiffies, timeout));
1546
1547 ret = -EBUSY;
1548
1549poll_active_end:
1550 /* second logged value is time-left before timeout or zero if expired */
1551 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_ACTV_END,
1552 ret, (ret ? 0 : (timeout - jiffies)), read_cnt);
1553
1554 return ret;
1555}
1556
1557static void i2c_msm_clk_path_vote(struct i2c_msm_ctrl *ctrl)
1558{
1559 i2c_msm_clk_path_init(ctrl);
1560
1561 if (ctrl->rsrcs.clk_path_vote.client_hdl)
1562 msm_bus_scale_client_update_request(
1563 ctrl->rsrcs.clk_path_vote.client_hdl,
1564 I2C_MSM_CLK_PATH_RESUME_VEC);
1565}
1566
1567static void i2c_msm_clk_path_unvote(struct i2c_msm_ctrl *ctrl)
1568{
1569 if (ctrl->rsrcs.clk_path_vote.client_hdl)
1570 msm_bus_scale_client_update_request(
1571 ctrl->rsrcs.clk_path_vote.client_hdl,
1572 I2C_MSM_CLK_PATH_SUSPEND_VEC);
1573}
1574
1575static void i2c_msm_clk_path_teardown(struct i2c_msm_ctrl *ctrl)
1576{
1577 if (ctrl->rsrcs.clk_path_vote.client_hdl) {
1578 msm_bus_scale_unregister_client(
1579 ctrl->rsrcs.clk_path_vote.client_hdl);
1580 ctrl->rsrcs.clk_path_vote.client_hdl = 0;
1581 }
1582}
1583
1584/*
1585 * i2c_msm_clk_path_init_structs: internal impl detail of i2c_msm_clk_path_init
1586 *
1587 * allocates and initilizes the bus scaling vectors.
1588 */
1589static int i2c_msm_clk_path_init_structs(struct i2c_msm_ctrl *ctrl)
1590{
1591 struct msm_bus_vectors *paths = NULL;
1592 struct msm_bus_paths *usecases = NULL;
1593
1594 i2c_msm_dbg(ctrl, MSM_PROF, "initializes path clock voting structs");
1595
1596 paths = devm_kzalloc(ctrl->dev, sizeof(*paths) * 2, GFP_KERNEL);
1597 if (!paths)
1598 return -ENOMEM;
1599
1600 usecases = devm_kzalloc(ctrl->dev, sizeof(*usecases) * 2, GFP_KERNEL);
1601 if (!usecases)
1602 goto path_init_err;
1603
1604 ctrl->rsrcs.clk_path_vote.pdata = devm_kzalloc(ctrl->dev,
1605 sizeof(*ctrl->rsrcs.clk_path_vote.pdata),
1606 GFP_KERNEL);
1607 if (!ctrl->rsrcs.clk_path_vote.pdata) {
1608 dev_err(ctrl->dev,
1609 "error msm_bus_scale_pdata memory allocation failed\n");
1610 goto path_init_err;
1611 }
1612
1613 paths[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
1614 .src = ctrl->rsrcs.clk_path_vote.mstr_id,
1615 .dst = MSM_BUS_SLAVE_EBI_CH0,
1616 .ab = 0,
1617 .ib = 0,
1618 };
1619
1620 paths[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
1621 .src = ctrl->rsrcs.clk_path_vote.mstr_id,
1622 .dst = MSM_BUS_SLAVE_EBI_CH0,
1623 .ab = I2C_MSM_CLK_PATH_AVRG_BW(ctrl),
1624 .ib = I2C_MSM_CLK_PATH_BRST_BW(ctrl),
1625 };
1626
1627 usecases[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
1628 .num_paths = 1,
1629 .vectors = &paths[I2C_MSM_CLK_PATH_SUSPEND_VEC],
1630 };
1631
1632 usecases[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
1633 .num_paths = 1,
1634 .vectors = &paths[I2C_MSM_CLK_PATH_RESUME_VEC],
1635 };
1636
1637 *ctrl->rsrcs.clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
1638 .usecase = usecases,
1639 .num_usecases = 2,
1640 .name = dev_name(ctrl->dev),
1641 };
1642
1643 return 0;
1644
1645path_init_err:
1646 devm_kfree(ctrl->dev, paths);
1647 devm_kfree(ctrl->dev, usecases);
1648 devm_kfree(ctrl->dev, ctrl->rsrcs.clk_path_vote.pdata);
1649 ctrl->rsrcs.clk_path_vote.pdata = NULL;
1650 return -ENOMEM;
1651}
1652
1653/*
1654 * i2c_msm_clk_path_postponed_register: reg with bus-scaling after it is probed
1655 *
1656 * @return zero on success
1657 *
1658 * Workaround: i2c driver may be probed before the bus scaling driver. Calling
1659 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
1660 * ready yet. Thus, this function should be called not from probe but from a
1661 * later context. Also, this function may be called more then once before
1662 * register succeed. At this case only one error message will be logged. At boot
1663 * time all clocks are on, so earlier i2c transactions should succeed.
1664 */
1665static int i2c_msm_clk_path_postponed_register(struct i2c_msm_ctrl *ctrl)
1666{
1667 ctrl->rsrcs.clk_path_vote.client_hdl =
1668 msm_bus_scale_register_client(ctrl->rsrcs.clk_path_vote.pdata);
1669
1670 if (ctrl->rsrcs.clk_path_vote.client_hdl) {
1671 if (ctrl->rsrcs.clk_path_vote.reg_err) {
1672 /* log a success message if an error msg was logged */
1673 ctrl->rsrcs.clk_path_vote.reg_err = false;
1674 dev_err(ctrl->dev,
1675 "msm_bus_scale_register_client(mstr-id:%d):0x%x (ok)",
1676 ctrl->rsrcs.clk_path_vote.mstr_id,
1677 ctrl->rsrcs.clk_path_vote.client_hdl);
1678 }
1679 } else {
1680 /* guard to log only one error on multiple failure */
1681 if (!ctrl->rsrcs.clk_path_vote.reg_err) {
1682 ctrl->rsrcs.clk_path_vote.reg_err = true;
1683
1684 dev_info(ctrl->dev,
1685 "msm_bus_scale_register_client(mstr-id:%d):0 (not a problem)",
1686 ctrl->rsrcs.clk_path_vote.mstr_id);
1687 }
1688 }
1689
1690 return ctrl->rsrcs.clk_path_vote.client_hdl ? 0 : -EAGAIN;
1691}
1692
1693static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl)
1694{
1695 /*
1696 * bail out if path voting is diabled (master_id == 0) or if it is
1697 * already registered (client_hdl != 0)
1698 */
1699 if (!ctrl->rsrcs.clk_path_vote.mstr_id ||
1700 ctrl->rsrcs.clk_path_vote.client_hdl)
1701 return;
1702
1703 /* if fail once then try no more */
1704 if (!ctrl->rsrcs.clk_path_vote.pdata &&
1705 i2c_msm_clk_path_init_structs(ctrl)) {
1706 ctrl->rsrcs.clk_path_vote.mstr_id = 0;
1707 return;
1708 };
1709
1710 /* on failure try again later */
1711 if (i2c_msm_clk_path_postponed_register(ctrl))
1712 return;
1713}
1714
1715/*
1716 * i2c_msm_qup_isr: QUP interrupt service routine
1717 */
1718static irqreturn_t i2c_msm_qup_isr(int irq, void *devid)
1719{
1720 struct i2c_msm_ctrl *ctrl = devid;
1721 void __iomem *base = ctrl->rsrcs.base;
1722 struct i2c_msm_xfer *xfer = &ctrl->xfer;
1723 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
1724 u32 err_flags = 0;
1725 u32 clr_flds = 0;
1726 bool log_event = false;
1727 bool signal_complete = false;
1728 bool need_wmb = false;
1729
1730 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_IRQ_BGN, irq, 0, 0);
1731
1732 if (!atomic_read(&ctrl->xfer.is_active)) {
1733 dev_info(ctrl->dev, "irq:%d when no active transfer\n", irq);
1734 return IRQ_HANDLED;
1735 }
1736
1737 ctrl->i2c_sts_reg = readl_relaxed(base + QUP_I2C_STATUS);
1738 err_flags = readl_relaxed(base + QUP_ERROR_FLAGS);
1739 ctrl->qup_op_reg = readl_relaxed(base + QUP_OPERATIONAL);
1740
1741 if (ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK) {
1742 signal_complete = true;
1743 log_event = true;
1744 /*
1745 * If there is more than 1 error here, last one sticks.
1746 * The order of the error set here matters.
1747 */
1748 if (ctrl->i2c_sts_reg & QUP_ARB_LOST)
1749 ctrl->xfer.err = I2C_MSM_ERR_ARB_LOST;
1750
1751 if (ctrl->i2c_sts_reg & QUP_BUS_ERROR)
1752 ctrl->xfer.err = I2C_MSM_ERR_BUS_ERR;
1753
1754 if (ctrl->i2c_sts_reg & QUP_PACKET_NACKED)
1755 ctrl->xfer.err = I2C_MSM_ERR_NACK;
1756 }
1757
1758 /* check for FIFO over/under runs error */
1759 if (err_flags & QUP_ERR_FLGS_MASK)
1760 ctrl->xfer.err = I2C_MSM_ERR_OVR_UNDR_RUN;
1761
1762 /* Dump the register values before reset the core */
1763 if (ctrl->xfer.err && ctrl->dbgfs.dbg_lvl >= MSM_DBG)
1764 i2c_msm_dbg_qup_reg_dump(ctrl);
1765
1766 /* clear interrupts fields */
1767 clr_flds = ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK;
1768 if (clr_flds) {
1769 writel_relaxed(clr_flds, base + QUP_I2C_STATUS);
1770 need_wmb = true;
1771 }
1772
1773 clr_flds = err_flags & QUP_ERR_FLGS_MASK;
1774 if (clr_flds) {
1775 writel_relaxed(clr_flds, base + QUP_ERROR_FLAGS);
1776 need_wmb = true;
1777 }
1778
1779 clr_flds = ctrl->qup_op_reg &
1780 (QUP_OUTPUT_SERVICE_FLAG |
1781 QUP_INPUT_SERVICE_FLAG);
1782 if (clr_flds) {
1783 writel_relaxed(clr_flds, base + QUP_OPERATIONAL);
1784 need_wmb = true;
1785 }
1786
1787 if (need_wmb)
1788 /*
1789 * flush writes that clear the interrupt flags before changing
1790 * state to reset.
1791 */
1792 wmb();
1793
1794 /* Reset and bail out on error */
1795 if (ctrl->xfer.err) {
1796 /* Flush for the tags in case of an error and DMA Mode*/
1797 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA) {
1798 writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
1799 + QUP_STATE);
1800 /*
1801 * Ensure that QUP_I2C_FLUSH is written before
1802 * State reset
1803 */
1804 wmb();
1805 }
1806
1807 /* HW workaround: when interrupt is level triggerd, more
1808 * than one interrupt may fire in error cases. Thus we
1809 * change the QUP core state to Reset immediately in the
1810 * ISR to ward off the next interrupt.
1811 */
1812 writel_relaxed(QUP_STATE_RESET, ctrl->rsrcs.base + QUP_STATE);
1813
1814 signal_complete = true;
1815 log_event = true;
1816 goto isr_end;
1817 }
1818
1819 /* handle data completion */
1820 if (xfer->mode_id == I2C_MSM_XFER_MODE_BLOCK) {
1821 /* block ready for writing */
1822 if (ctrl->qup_op_reg & QUP_OUTPUT_SERVICE_FLAG) {
1823 log_event = true;
1824 if (ctrl->qup_op_reg & QUP_OUT_BLOCK_WRITE_REQ)
1825 complete(&blk->wait_tx_blk);
1826
1827 if ((ctrl->qup_op_reg & blk->complete_mask)
1828 == blk->complete_mask) {
1829 log_event = true;
1830 signal_complete = true;
1831 }
1832 }
1833 /* block ready for reading */
1834 if (ctrl->qup_op_reg & QUP_INPUT_SERVICE_FLAG) {
1835 log_event = true;
1836 complete(&blk->wait_rx_blk);
1837 }
1838 } else {
1839 /* for FIFO/DMA Mode*/
1840 if (ctrl->qup_op_reg & QUP_MAX_INPUT_DONE_FLAG) {
1841 log_event = true;
1842 /*
1843 * If last transaction is an input then the entire
1844 * transfer is done
1845 */
1846 if (ctrl->xfer.last_is_rx)
1847 signal_complete = true;
1848 }
1849 /*
1850 * Ideally, would like to check QUP_MAX_OUTPUT_DONE_FLAG.
1851 * However, QUP_MAX_OUTPUT_DONE_FLAG is lagging behind
1852 * QUP_OUTPUT_SERVICE_FLAG. The only reason for
1853 * QUP_OUTPUT_SERVICE_FLAG to be set in FIFO mode is
1854 * QUP_MAX_OUTPUT_DONE_FLAG condition. The code checking
1855 * here QUP_OUTPUT_SERVICE_FLAG and assumes that
1856 * QUP_MAX_OUTPUT_DONE_FLAG.
1857 */
1858 if (ctrl->qup_op_reg & (QUP_OUTPUT_SERVICE_FLAG |
1859 QUP_MAX_OUTPUT_DONE_FLAG)) {
1860 log_event = true;
1861 /*
1862 * If last transaction is an output then the
1863 * entire transfer is done
1864 */
1865 if (!ctrl->xfer.last_is_rx)
1866 signal_complete = true;
1867 }
1868 }
1869
1870isr_end:
1871 if (log_event || (ctrl->dbgfs.dbg_lvl >= MSM_DBG))
1872 i2c_msm_prof_evnt_add(ctrl, MSM_PROF,
1873 I2C_MSM_IRQ_END,
1874 ctrl->i2c_sts_reg, ctrl->qup_op_reg,
1875 err_flags);
1876
1877 if (signal_complete)
1878 complete(&ctrl->xfer.complete);
1879
1880 return IRQ_HANDLED;
1881}
1882
1883static void i2x_msm_blk_free_cache(struct i2c_msm_ctrl *ctrl)
1884{
1885 kfree(ctrl->xfer.blk.tx_cache);
1886 kfree(ctrl->xfer.blk.rx_cache);
1887}
1888
1889static void i2c_msm_qup_init(struct i2c_msm_ctrl *ctrl)
1890{
1891 u32 state;
1892 void __iomem *base = ctrl->rsrcs.base;
1893
1894 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_PROF_RESET, 0, 0, 0);
1895
1896 i2c_msm_qup_sw_reset(ctrl);
1897 i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1898
1899 writel_relaxed(QUP_N_VAL | QUP_MINI_CORE_I2C_VAL, base + QUP_CONFIG);
1900
1901 writel_relaxed(QUP_OUTPUT_OVER_RUN_ERR_EN | QUP_INPUT_UNDER_RUN_ERR_EN
1902 | QUP_OUTPUT_UNDER_RUN_ERR_EN | QUP_INPUT_OVER_RUN_ERR_EN,
1903 base + QUP_ERROR_FLAGS_EN);
1904
1905 writel_relaxed(QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK,
1906 base + QUP_OPERATIONAL_MASK);
1907
1908 writel_relaxed(QUP_EN_VERSION_TWO_TAG, base + QUP_I2C_MASTER_CONFIG);
1909
1910 i2c_msm_qup_fifo_calc_size(ctrl);
1911 /*
1912 * Ensure that QUP configuration is written and that fifo size if read
1913 * before leaving this function
1914 */
1915 mb();
1916
1917 state = readl_relaxed(base + QUP_STATE);
1918
1919 if (!(state & QUP_I2C_MAST_GEN))
1920 dev_err(ctrl->dev,
1921 "error on verifying HW support (I2C_MAST_GEN=0)\n");
1922}
1923
1924/*
1925 * qup_i2c_try_recover_bus_busy: issue QUP bus clear command
1926 */
1927static int qup_i2c_try_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
1928{
1929 int ret;
1930 ulong min_sleep_usec;
1931
1932 /* call i2c_msm_qup_init() to set core in idle state */
1933 i2c_msm_qup_init(ctrl);
1934
1935 /* must be in run state for bus clear */
1936 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1937 if (ret < 0) {
1938 dev_err(ctrl->dev, "error: bus clear fail to set run state\n");
1939 return ret;
1940 }
1941
1942 /*
1943 * call i2c_msm_qup_xfer_init_run_state() to set clock dividers.
1944 * the dividers are necessary for bus clear.
1945 */
1946 i2c_msm_qup_xfer_init_run_state(ctrl);
1947
1948 writel_relaxed(0x1, ctrl->rsrcs.base + QUP_I2C_MASTER_BUS_CLR);
1949
1950 /*
1951 * wait for recovery (9 clock pulse cycles) to complete.
1952 * min_time = 9 clock *10 (1000% margin)
1953 * max_time = 10* min_time
1954 */
1955 min_sleep_usec =
1956 max_t(ulong, (9 * 10 * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out, 100);
1957
1958 usleep_range(min_sleep_usec, min_sleep_usec * 10);
1959 return ret;
1960}
1961
1962static int qup_i2c_recover_bus_busy(struct i2c_msm_ctrl *ctrl)
1963{
1964 u32 bus_clr, bus_active, status;
1965 int retry = 0;
1966
1967 dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
1968
1969 do {
1970 qup_i2c_try_recover_bus_busy(ctrl);
1971 bus_clr = readl_relaxed(ctrl->rsrcs.base +
1972 QUP_I2C_MASTER_BUS_CLR);
1973 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
1974 bus_active = status & I2C_STATUS_BUS_ACTIVE;
1975 if (++retry >= I2C_QUP_MAX_BUS_RECOVERY_RETRY)
1976 break;
1977 } while (bus_clr || bus_active);
1978
1979 dev_info(ctrl->dev, "Bus recovery %s after %d retries\n",
1980 (bus_clr || bus_active) ? "fail" : "success", retry);
1981 return 0;
1982}
1983
1984static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
1985{
1986 /* poll until bus is released */
1987 if (i2c_msm_qup_poll_bus_active_unset(ctrl)) {
1988 if ((ctrl->xfer.err == I2C_MSM_ERR_ARB_LOST) ||
1989 (ctrl->xfer.err == I2C_MSM_ERR_BUS_ERR) ||
1990 (ctrl->xfer.err == I2C_MSM_ERR_TIMEOUT)) {
1991 if (i2c_msm_qup_slv_holds_bus(ctrl))
1992 qup_i2c_recover_bus_busy(ctrl);
1993
1994 /* do not generalize error to EIO if its already set */
1995 if (!err)
1996 err = -EIO;
1997 }
1998 }
1999
2000 /*
2001 * Disable the IRQ before change to reset state to avoid
2002 * spurious interrupts.
2003 *
2004 */
2005 disable_irq(ctrl->rsrcs.irq);
2006
2007 /* flush dma data and reset the qup core in timeout error.
2008 * for other error case, its handled by the ISR
2009 */
2010 if (ctrl->xfer.err & I2C_MSM_ERR_TIMEOUT) {
2011 /* Flush for the DMA registers */
2012 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
2013 writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
2014 + QUP_STATE);
2015
2016 /* reset the qup core */
2017 i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
2018 err = -ETIMEDOUT;
2019 } else if (ctrl->xfer.err == I2C_MSM_ERR_NACK) {
2020 err = -ENOTCONN;
2021 }
2022
2023 return err;
2024}
2025
2026static enum i2c_msm_xfer_mode_id
2027i2c_msm_qup_choose_mode(struct i2c_msm_ctrl *ctrl)
2028{
2029 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
2030 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2031 size_t rx_cnt_sum = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
2032 size_t tx_cnt_sum = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
2033
2034
2035 if (ctrl->dbgfs.force_xfer_mode != I2C_MSM_XFER_MODE_NONE)
2036 return ctrl->dbgfs.force_xfer_mode;
2037
2038 if (((rx_cnt_sum < fifo->input_fifo_sz) &&
2039 (tx_cnt_sum < fifo->output_fifo_sz)))
2040 return I2C_MSM_XFER_MODE_FIFO;
2041
2042 if (ctrl->rsrcs.disable_dma)
2043 return I2C_MSM_XFER_MODE_BLOCK;
2044
2045 return I2C_MSM_XFER_MODE_DMA;
2046}
2047
2048/*
2049 * i2c_msm_xfer_calc_timeout: calc maximum xfer time in jiffies
2050 *
2051 * Basically timeout = (bit_count / frequency) * safety_coefficient.
2052 * The safety-coefficient also accounts for debugging delay (mostly from
2053 * printk() calls).
2054 */
2055static void i2c_msm_xfer_calc_timeout(struct i2c_msm_ctrl *ctrl)
2056{
2057 size_t byte_cnt = ctrl->xfer.rx_cnt + ctrl->xfer.tx_cnt;
2058 size_t bit_cnt = byte_cnt * 9;
2059 size_t bit_usec = (bit_cnt * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out;
2060 size_t loging_ovrhd_coef = ctrl->dbgfs.dbg_lvl + 1;
2061 size_t safety_coef = I2C_MSM_TIMEOUT_SAFETY_COEF * loging_ovrhd_coef;
2062 size_t xfer_max_usec = (bit_usec * safety_coef) +
2063 I2C_MSM_TIMEOUT_MIN_USEC;
2064
2065 ctrl->xfer.timeout = usecs_to_jiffies(xfer_max_usec);
2066}
2067
2068static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
2069 struct completion *complete)
2070{
2071 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2072 long time_left;
2073 int ret = 0;
2074
2075 time_left = wait_for_completion_timeout(complete,
2076 xfer->timeout);
2077 if (!time_left) {
2078 xfer->err = I2C_MSM_ERR_TIMEOUT;
2079 i2c_msm_dbg_dump_diag(ctrl, false, 0, 0);
2080 ret = -EIO;
2081 i2c_msm_prof_evnt_add(ctrl, MSM_ERR, I2C_MSM_COMPLT_FL,
2082 xfer->timeout, time_left, 0);
2083 } else {
2084 /* return an error if one detected by ISR */
2085 if (ctrl->xfer.err ||
2086 (ctrl->dbgfs.dbg_lvl >= MSM_DBG)) {
2087 i2c_msm_dbg_dump_diag(ctrl, true,
2088 ctrl->i2c_sts_reg, ctrl->qup_op_reg);
2089 ret = -(xfer->err);
2090 }
2091 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_COMPLT_OK,
2092 xfer->timeout, time_left, 0);
2093 }
2094
2095 return ret;
2096}
2097
2098static u16 i2c_msm_slv_rd_wr_addr(u16 slv_addr, bool is_rx)
2099{
2100 return (slv_addr << 1) | (is_rx ? 0x1 : 0x0);
2101}
2102
2103/*
2104 * @return true when the current transfer's buffer points to the last message
2105 * of the user's request.
2106 */
2107static bool i2c_msm_xfer_msg_is_last(struct i2c_msm_ctrl *ctrl)
2108{
2109 return ctrl->xfer.cur_buf.msg_idx >= (ctrl->xfer.msg_cnt - 1);
2110}
2111
2112/*
2113 * @return true when the current transfer's buffer points to the last
2114 * transferable buffer (size =< QUP_MAX_BUF_SZ) of the last message of the
2115 * user's request.
2116 */
2117static bool i2c_msm_xfer_buf_is_last(struct i2c_msm_ctrl *ctrl)
2118{
2119 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2120 struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
2121
2122 return i2c_msm_xfer_msg_is_last(ctrl) &&
2123 ((cur_buf->byte_idx + QUP_MAX_BUF_SZ) >= cur_msg->len);
2124}
2125
2126static void i2c_msm_xfer_create_cur_tag(struct i2c_msm_ctrl *ctrl,
2127 bool start_req)
2128{
2129 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2130
2131 cur_buf->out_tag = i2c_msm_tag_create(start_req, cur_buf->is_last,
2132 cur_buf->is_rx, cur_buf->len,
2133 cur_buf->slv_addr);
2134
2135 cur_buf->in_tag.len = cur_buf->is_rx ? QUP_BUF_OVERHD_BC : 0;
2136}
2137
2138/*
2139 * i2c_msm_xfer_next_buf: support cases when msg.len > 256 bytes
2140 *
2141 * @return true when next buffer exist, or false when no such buffer
2142 */
2143static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
2144{
2145 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2146 struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
Vipin Deep Kaurfecef602019-08-05 12:15:36 +05302147 int bc_rem = 0;
Shrey Vijayd494f5e2017-07-24 16:08:33 +05302148
Vipin Deep Kaurfecef602019-08-05 12:15:36 +05302149 if (!cur_msg)
2150 return false;
2151
2152 bc_rem = cur_msg->len - cur_buf->end_idx;
Shrey Vijayd494f5e2017-07-24 16:08:33 +05302153 if (cur_buf->is_init && cur_buf->end_idx && bc_rem) {
2154 /* not the first buffer in a message */
2155
2156 cur_buf->byte_idx = cur_buf->end_idx;
2157 cur_buf->is_last = i2c_msm_xfer_buf_is_last(ctrl);
2158 cur_buf->len = min_t(int, bc_rem, QUP_MAX_BUF_SZ);
2159 cur_buf->end_idx += cur_buf->len;
2160
2161 /* No Start is required if it is not a first buffer in msg */
2162 i2c_msm_xfer_create_cur_tag(ctrl, false);
2163 } else {
2164 /* first buffer in a new message */
2165 if (cur_buf->is_init) {
2166 if (i2c_msm_xfer_msg_is_last(ctrl))
2167 return false;
2168
2169 ++cur_buf->msg_idx;
2170 ++cur_msg;
Shrey Vijayd494f5e2017-07-24 16:08:33 +05302171 } else {
2172 cur_buf->is_init = true;
2173 }
2174 cur_buf->byte_idx = 0;
2175 cur_buf->is_last = i2c_msm_xfer_buf_is_last(ctrl);
2176 cur_buf->len = min_t(int, cur_msg->len, QUP_MAX_BUF_SZ);
2177 cur_buf->is_rx = (cur_msg->flags & I2C_M_RD);
2178 cur_buf->end_idx = cur_buf->len;
2179 cur_buf->slv_addr = i2c_msm_slv_rd_wr_addr(cur_msg->addr,
2180 cur_buf->is_rx);
2181 i2c_msm_xfer_create_cur_tag(ctrl, true);
2182 }
2183 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_NEXT_BUF, cur_buf->msg_idx,
2184 cur_buf->byte_idx, 0);
2185 return true;
2186}
2187
2188static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
2189{
2190 clk_unprepare(ctrl->rsrcs.core_clk);
2191 clk_unprepare(ctrl->rsrcs.iface_clk);
2192}
2193
2194static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
2195{
2196 int ret;
2197
2198 ret = clk_prepare(ctrl->rsrcs.iface_clk);
2199
2200 if (ret) {
2201 dev_err(ctrl->dev,
2202 "error on clk_prepare(iface_clk):%d\n", ret);
2203 return ret;
2204 }
2205
2206 ret = clk_prepare(ctrl->rsrcs.core_clk);
2207 if (ret) {
2208 clk_unprepare(ctrl->rsrcs.iface_clk);
2209 dev_err(ctrl->dev,
2210 "error clk_prepare(core_clk):%d\n", ret);
2211 }
2212 return ret;
2213}
2214
2215static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
2216{
2217 clk_disable(ctrl->rsrcs.core_clk);
2218 clk_disable(ctrl->rsrcs.iface_clk);
2219}
2220
2221static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
2222{
2223 int ret;
2224
2225 ret = clk_enable(ctrl->rsrcs.iface_clk);
2226 if (ret) {
2227 dev_err(ctrl->dev,
2228 "error on clk_enable(iface_clk):%d\n", ret);
2229 i2c_msm_pm_clk_unprepare(ctrl);
2230 return ret;
2231 }
2232 ret = clk_enable(ctrl->rsrcs.core_clk);
2233 if (ret) {
2234 clk_disable(ctrl->rsrcs.iface_clk);
2235 i2c_msm_pm_clk_unprepare(ctrl);
2236 dev_err(ctrl->dev,
2237 "error clk_enable(core_clk):%d\n", ret);
2238 }
2239 return ret;
2240}
2241
2242static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
2243{
2244 int ret;
2245
2246 mutex_lock(&ctrl->xfer.mtx);
2247
2248 i2c_msm_pm_pinctrl_state(ctrl, true);
2249 pm_runtime_get_sync(ctrl->dev);
2250 /*
2251 * if runtime PM callback was not invoked (when both runtime-pm
2252 * and systme-pm are in transition concurrently)
2253 */
2254 if (ctrl->pwr_state != I2C_MSM_PM_RT_ACTIVE) {
2255 dev_info(ctrl->dev, "Runtime PM-callback was not invoked.\n");
2256 i2c_msm_pm_resume(ctrl->dev);
2257 }
2258
2259 ret = i2c_msm_pm_clk_enable(ctrl);
2260 if (ret) {
2261 mutex_unlock(&ctrl->xfer.mtx);
2262 return ret;
2263 }
2264 i2c_msm_qup_init(ctrl);
2265
2266 /* Set xfer to active state (efectively enabling our ISR)*/
2267 atomic_set(&ctrl->xfer.is_active, 1);
2268
2269 enable_irq(ctrl->rsrcs.irq);
2270 return 0;
2271}
2272
2273static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
2274{
2275
2276 atomic_set(&ctrl->xfer.is_active, 0);
2277
2278 /*
2279 * DMA resources are freed due to multi-EE use case.
2280 * Other EEs can potentially use the DMA
2281 * resources with in the same runtime PM vote.
2282 */
2283 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
2284 i2c_msm_dma_free_channels(ctrl);
2285
2286 i2c_msm_pm_clk_disable(ctrl);
2287
2288 if (!pm_runtime_enabled(ctrl->dev))
2289 i2c_msm_pm_suspend(ctrl->dev);
2290
2291 pm_runtime_mark_last_busy(ctrl->dev);
2292 pm_runtime_put_autosuspend(ctrl->dev);
2293 i2c_msm_pm_pinctrl_state(ctrl, false);
2294 mutex_unlock(&ctrl->xfer.mtx);
2295}
2296
2297/*
2298 * i2c_msm_xfer_scan: initial input scan
2299 */
2300static void i2c_msm_xfer_scan(struct i2c_msm_ctrl *ctrl)
2301{
2302 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2303 struct i2c_msm_xfer_buf *cur_buf = &xfer->cur_buf;
2304
2305 while (i2c_msm_xfer_next_buf(ctrl)) {
2306
2307 if (cur_buf->is_rx)
2308 xfer->rx_cnt += cur_buf->len;
2309 else
2310 xfer->tx_cnt += cur_buf->len;
2311
2312 xfer->rx_ovrhd_cnt += cur_buf->in_tag.len;
2313 xfer->tx_ovrhd_cnt += cur_buf->out_tag.len;
2314
2315 if (i2c_msm_xfer_msg_is_last(ctrl))
2316 xfer->last_is_rx = cur_buf->is_rx;
2317 }
2318 xfer->cur_buf = (struct i2c_msm_xfer_buf){0};
2319}
2320
2321static int
2322i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
2323{
2324 int ret = 0;
2325 struct i2c_msm_ctrl *ctrl = i2c_get_adapdata(adap);
2326 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2327
Vipin Deep Kaurfecef602019-08-05 12:15:36 +05302328 if (IS_ERR_OR_NULL(msgs) || num < 1) {
Vipin Deep Kaur5356ac52018-10-08 17:08:43 +05302329 dev_err(ctrl->dev,
Vipin Deep Kaurfecef602019-08-05 12:15:36 +05302330 "Error on msgs Accessing invalid message pointer or message buffer\n");
Vipin Deep Kaur5356ac52018-10-08 17:08:43 +05302331 return -EINVAL;
2332 }
2333
Shrey Vijayd494f5e2017-07-24 16:08:33 +05302334 /* if system is suspended just bail out */
2335 if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
2336 dev_err(ctrl->dev,
2337 "slave:0x%x is calling xfer when system is suspended\n",
2338 msgs->addr);
2339 return -EIO;
2340 }
2341
2342 ret = i2c_msm_pm_xfer_start(ctrl);
2343 if (ret)
2344 return ret;
2345
2346 /* init xfer */
2347 xfer->msgs = msgs;
2348 xfer->msg_cnt = num;
2349 xfer->mode_id = I2C_MSM_XFER_MODE_NONE;
2350 xfer->err = 0;
2351 xfer->rx_cnt = 0;
2352 xfer->tx_cnt = 0;
2353 xfer->rx_ovrhd_cnt = 0;
2354 xfer->tx_ovrhd_cnt = 0;
2355 atomic_set(&xfer->event_cnt, 0);
2356 init_completion(&xfer->complete);
2357 init_completion(&xfer->rx_complete);
2358
2359 xfer->cur_buf.is_init = false;
2360 xfer->cur_buf.msg_idx = 0;
2361
2362 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_BEG, num,
2363 msgs->addr, 0);
2364
2365 i2c_msm_xfer_scan(ctrl);
2366 i2c_msm_xfer_calc_timeout(ctrl);
2367 xfer->mode_id = i2c_msm_qup_choose_mode(ctrl);
2368
2369 dev_dbg(ctrl->dev, "xfer() mode:%d msg_cnt:%d rx_cbt:%zu tx_cnt:%zu\n",
2370 xfer->mode_id, xfer->msg_cnt, xfer->rx_cnt, xfer->tx_cnt);
2371
2372 switch (xfer->mode_id) {
2373 case I2C_MSM_XFER_MODE_FIFO:
2374 ret = i2c_msm_fifo_xfer(ctrl);
2375 break;
2376 case I2C_MSM_XFER_MODE_BLOCK:
2377 ret = i2c_msm_blk_xfer(ctrl);
2378 break;
2379 case I2C_MSM_XFER_MODE_DMA:
2380 ret = i2c_msm_dma_xfer(ctrl);
2381 break;
2382 default:
2383 ret = -EINTR;
2384 };
2385
2386 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_SCAN_SUM,
2387 ((xfer->rx_cnt & 0xff) | ((xfer->rx_ovrhd_cnt & 0xff) << 16)),
2388 ((xfer->tx_cnt & 0xff) | ((xfer->tx_ovrhd_cnt & 0xff) << 16)),
2389 ((ctrl->xfer.timeout & 0xfff) | ((xfer->mode_id & 0xf) << 24)));
2390
2391 ret = i2c_msm_qup_post_xfer(ctrl, ret);
2392 /* on success, return number of messages sent (which is index + 1)*/
2393 if (!ret)
2394 ret = xfer->cur_buf.msg_idx + 1;
2395
2396 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_END, ret, xfer->err,
2397 xfer->cur_buf.msg_idx + 1);
2398 /* process and dump profiling data */
2399 if (xfer->err || (ctrl->dbgfs.dbg_lvl >= MSM_PROF))
2400 i2c_msm_prof_evnt_dump(ctrl);
2401
2402 i2c_msm_pm_xfer_end(ctrl);
2403 return ret;
2404}
2405
2406enum i2c_msm_dt_entry_status {
2407 DT_REQ, /* Required: fail if missing */
2408 DT_SGST, /* Suggested: warn if missing */
2409 DT_OPT, /* Optional: don't warn if missing */
2410};
2411
2412enum i2c_msm_dt_entry_type {
2413 DT_U32,
2414 DT_BOOL,
2415 DT_ID, /* of_alias_get_id() */
2416};
2417
2418struct i2c_msm_dt_to_pdata_map {
2419 const char *dt_name;
2420 void *ptr_data;
2421 enum i2c_msm_dt_entry_status status;
2422 enum i2c_msm_dt_entry_type type;
2423 int default_val;
2424};
2425
2426static int i2c_msm_dt_to_pdata_populate(struct i2c_msm_ctrl *ctrl,
2427 struct platform_device *pdev,
2428 struct i2c_msm_dt_to_pdata_map *itr)
2429{
2430 int ret, err = 0;
2431 struct device_node *node = pdev->dev.of_node;
2432
2433 for (; itr->dt_name ; ++itr) {
2434 switch (itr->type) {
2435 case DT_U32:
2436 ret = of_property_read_u32(node, itr->dt_name,
2437 (u32 *) itr->ptr_data);
2438 break;
2439 case DT_BOOL:
2440 *((bool *) itr->ptr_data) =
2441 of_property_read_bool(node, itr->dt_name);
2442 ret = 0;
2443 break;
2444 case DT_ID:
2445 ret = of_alias_get_id(node, itr->dt_name);
2446 if (ret >= 0) {
2447 *((int *) itr->ptr_data) = ret;
2448 ret = 0;
2449 }
2450 break;
2451 default:
2452 dev_err(ctrl->dev,
2453 "error %d is of unknown DT entry type\n",
2454 itr->type);
2455 ret = -EBADE;
2456 }
2457
2458 i2c_msm_dbg(ctrl, MSM_PROF, "DT entry ret:%d name:%s val:%d",
2459 ret, itr->dt_name, *((int *)itr->ptr_data));
2460
2461 if (ret) {
2462 *((int *)itr->ptr_data) = itr->default_val;
2463
2464 if (itr->status < DT_OPT) {
2465 dev_err(ctrl->dev,
2466 "error Missing '%s' DT entry\n",
2467 itr->dt_name);
2468
2469 /* cont on err to dump all missing entries */
2470 if (itr->status == DT_REQ && !err)
2471 err = ret;
2472 }
2473 }
2474 }
2475
2476 return err;
2477}
2478
2479
2480/*
2481 * i2c_msm_rsrcs_process_dt: copy data from DT to platform data
2482 * @return zero on success or negative error code
2483 */
2484static int i2c_msm_rsrcs_process_dt(struct i2c_msm_ctrl *ctrl,
2485 struct platform_device *pdev)
2486{
2487 u32 fs_clk_div, ht_clk_div, noise_rjct_scl, noise_rjct_sda;
2488 int ret;
2489
2490 struct i2c_msm_dt_to_pdata_map map[] = {
2491 {"i2c", &pdev->id, DT_REQ, DT_ID, -1},
2492 {"qcom,clk-freq-out", &ctrl->rsrcs.clk_freq_out,
2493 DT_REQ, DT_U32, 0},
2494 {"qcom,clk-freq-in", &ctrl->rsrcs.clk_freq_in,
2495 DT_REQ, DT_U32, 0},
2496 {"qcom,disable-dma", &(ctrl->rsrcs.disable_dma),
2497 DT_OPT, DT_BOOL, 0},
2498 {"qcom,master-id", &(ctrl->rsrcs.clk_path_vote.mstr_id),
2499 DT_SGST, DT_U32, 0},
2500 {"qcom,noise-rjct-scl", &noise_rjct_scl,
2501 DT_OPT, DT_U32, 0},
2502 {"qcom,noise-rjct-sda", &noise_rjct_sda,
2503 DT_OPT, DT_U32, 0},
2504 {"qcom,high-time-clk-div", &ht_clk_div,
2505 DT_OPT, DT_U32, 0},
2506 {"qcom,fs-clk-div", &fs_clk_div,
2507 DT_OPT, DT_U32, 0},
2508 {NULL, NULL, 0, 0, 0},
2509 };
2510
2511 ret = i2c_msm_dt_to_pdata_populate(ctrl, pdev, map);
2512 if (ret)
2513 return ret;
2514
2515 /* set divider and noise reject values */
2516 return i2c_msm_set_mstr_clk_ctl(ctrl, fs_clk_div, ht_clk_div,
2517 noise_rjct_scl, noise_rjct_sda);
2518}
2519
2520/*
2521 * i2c_msm_rsrcs_mem_init: reads pdata request region and ioremap it
2522 * @return zero on success or negative error code
2523 */
2524static int i2c_msm_rsrcs_mem_init(struct platform_device *pdev,
2525 struct i2c_msm_ctrl *ctrl)
2526{
2527 struct resource *mem_region;
2528
2529 ctrl->rsrcs.mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2530 "qup_phys_addr");
2531 if (!ctrl->rsrcs.mem) {
2532 dev_err(ctrl->dev, "error Missing 'qup_phys_addr' resource\n");
2533 return -ENODEV;
2534 }
2535
2536 mem_region = request_mem_region(ctrl->rsrcs.mem->start,
2537 resource_size(ctrl->rsrcs.mem),
2538 pdev->name);
2539 if (!mem_region) {
2540 dev_err(ctrl->dev,
2541 "QUP physical memory region already claimed\n");
2542 return -EBUSY;
2543 }
2544
2545 ctrl->rsrcs.base = devm_ioremap(ctrl->dev, ctrl->rsrcs.mem->start,
2546 resource_size(ctrl->rsrcs.mem));
2547 if (!ctrl->rsrcs.base) {
2548 dev_err(ctrl->dev,
2549 "error failed ioremap(base:0x%llx size:0x%llx\n)",
2550 (u64) ctrl->rsrcs.mem->start,
2551 (u64) resource_size(ctrl->rsrcs.mem));
2552 release_mem_region(ctrl->rsrcs.mem->start,
2553 resource_size(ctrl->rsrcs.mem));
2554 return -ENOMEM;
2555 }
2556
2557 return 0;
2558}
2559
2560static void i2c_msm_rsrcs_mem_teardown(struct i2c_msm_ctrl *ctrl)
2561{
2562 release_mem_region(ctrl->rsrcs.mem->start,
2563 resource_size(ctrl->rsrcs.mem));
2564}
2565
2566/*
2567 * i2c_msm_rsrcs_irq_init: finds irq num in pdata and requests it
2568 * @return zero on success or negative error code
2569 */
2570static int i2c_msm_rsrcs_irq_init(struct platform_device *pdev,
2571 struct i2c_msm_ctrl *ctrl)
2572{
2573 int ret, irq;
2574
2575 irq = platform_get_irq_byname(pdev, "qup_irq");
2576 if (irq < 0) {
2577 dev_err(ctrl->dev, "error reading irq resource\n");
2578 return irq;
2579 }
2580
2581 ret = request_irq(irq, i2c_msm_qup_isr, IRQF_TRIGGER_HIGH,
2582 "i2c-msm-v2-irq", ctrl);
2583 if (ret) {
2584 dev_err(ctrl->dev, "error request_irq(irq_num:%d ) ret:%d\n",
2585 irq, ret);
2586 return ret;
2587 }
2588
2589 disable_irq(irq);
2590 ctrl->rsrcs.irq = irq;
2591 return 0;
2592}
2593
2594static void i2c_msm_rsrcs_irq_teardown(struct i2c_msm_ctrl *ctrl)
2595{
2596 free_irq(ctrl->rsrcs.irq, ctrl);
2597}
2598
2599
2600static struct pinctrl_state *
2601i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl, const char *name)
2602{
2603 struct pinctrl_state *pin_state
2604 = pinctrl_lookup_state(ctrl->rsrcs.pinctrl, name);
2605
2606 if (IS_ERR_OR_NULL(pin_state))
2607 dev_info(ctrl->dev, "note pinctrl_lookup_state(%s) err:%ld\n",
2608 name, PTR_ERR(pin_state));
2609 return pin_state;
2610}
2611
2612/*
2613 * i2c_msm_rsrcs_gpio_pinctrl_init: initializes the pinctrl for i2c gpios
2614 *
2615 * @pre platform data must be initialized
2616 */
2617static int i2c_msm_rsrcs_gpio_pinctrl_init(struct i2c_msm_ctrl *ctrl)
2618{
2619 ctrl->rsrcs.pinctrl = devm_pinctrl_get(ctrl->dev);
2620 if (IS_ERR_OR_NULL(ctrl->rsrcs.pinctrl)) {
2621 dev_err(ctrl->dev, "error devm_pinctrl_get() failed err:%ld\n",
2622 PTR_ERR(ctrl->rsrcs.pinctrl));
2623 return PTR_ERR(ctrl->rsrcs.pinctrl);
2624 }
2625
2626 ctrl->rsrcs.gpio_state_active =
2627 i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_ACTIVE);
2628
2629 ctrl->rsrcs.gpio_state_suspend =
2630 i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_SUSPEND);
2631
2632 return 0;
2633}
2634
2635static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
2636 bool runtime_active)
2637{
2638 struct pinctrl_state *pins_state;
2639 const char *pins_state_name;
2640
2641 if (runtime_active) {
2642 pins_state = ctrl->rsrcs.gpio_state_active;
2643 pins_state_name = I2C_MSM_PINCTRL_ACTIVE;
2644 } else {
2645 pins_state = ctrl->rsrcs.gpio_state_suspend;
2646 pins_state_name = I2C_MSM_PINCTRL_SUSPEND;
2647 }
2648
2649 if (!IS_ERR_OR_NULL(pins_state)) {
2650 int ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, pins_state);
2651
2652 if (ret)
2653 dev_err(ctrl->dev,
2654 "error pinctrl_select_state(%s) err:%d\n",
2655 pins_state_name, ret);
2656 } else {
2657 dev_err(ctrl->dev,
2658 "error pinctrl state-name:'%s' is not configured\n",
2659 pins_state_name);
2660 }
2661}
2662
2663/*
2664 * i2c_msm_rsrcs_clk_init: get clocks and set rate
2665 *
2666 * @return zero on success or negative error code
2667 */
2668static int i2c_msm_rsrcs_clk_init(struct i2c_msm_ctrl *ctrl)
2669{
2670 int ret = 0;
2671
2672 if ((ctrl->rsrcs.clk_freq_out <= 0) ||
2673 (ctrl->rsrcs.clk_freq_out > I2C_MSM_CLK_FAST_PLUS_FREQ)) {
2674 dev_err(ctrl->dev,
2675 "error clock frequency %dKHZ is not supported\n",
2676 (ctrl->rsrcs.clk_freq_out / 1000));
2677 return -EIO;
2678 }
2679
2680 ctrl->rsrcs.core_clk = clk_get(ctrl->dev, "core_clk");
2681 if (IS_ERR(ctrl->rsrcs.core_clk)) {
2682 ret = PTR_ERR(ctrl->rsrcs.core_clk);
2683 dev_err(ctrl->dev, "error on clk_get(core_clk):%d\n", ret);
2684 return ret;
2685 }
2686
2687 ret = clk_set_rate(ctrl->rsrcs.core_clk, ctrl->rsrcs.clk_freq_in);
2688 if (ret) {
2689 dev_err(ctrl->dev, "error on clk_set_rate(core_clk, %dKHz):%d\n",
2690 (ctrl->rsrcs.clk_freq_in / 1000), ret);
2691 goto err_set_rate;
2692 }
2693
2694 ctrl->rsrcs.iface_clk = clk_get(ctrl->dev, "iface_clk");
2695 if (IS_ERR(ctrl->rsrcs.iface_clk)) {
2696 ret = PTR_ERR(ctrl->rsrcs.iface_clk);
2697 dev_err(ctrl->dev, "error on clk_get(iface_clk):%d\n", ret);
2698 goto err_set_rate;
2699 }
2700
2701 return 0;
2702
2703err_set_rate:
2704 clk_put(ctrl->rsrcs.core_clk);
2705 ctrl->rsrcs.core_clk = NULL;
2706 return ret;
2707}
2708
2709static void i2c_msm_rsrcs_clk_teardown(struct i2c_msm_ctrl *ctrl)
2710{
2711 clk_put(ctrl->rsrcs.core_clk);
2712 clk_put(ctrl->rsrcs.iface_clk);
2713 i2c_msm_clk_path_teardown(ctrl);
2714}
2715
2716
2717
2718static void i2c_msm_pm_suspend(struct device *dev)
2719{
2720 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2721
2722 if (ctrl->pwr_state == I2C_MSM_PM_RT_SUSPENDED) {
2723 dev_err(ctrl->dev, "attempt to suspend when suspended\n");
2724 return;
2725 }
2726 i2c_msm_dbg(ctrl, MSM_DBG, "suspending...");
2727 i2c_msm_pm_clk_unprepare(ctrl);
2728 i2c_msm_clk_path_unvote(ctrl);
2729
2730 /*
2731 * We implement system and runtime suspend in the same way. However
2732 * it is important for us to distinguish between them in when servicing
2733 * a transfer requests. If we get transfer request while in runtime
2734 * suspend we want to simply wake up and service that request. But if we
2735 * get a transfer request while system is suspending we want to bail
2736 * out on that request. This is why if we marked that we are in system
2737 * suspend, we do not want to override that state with runtime suspend.
2738 */
2739 if (ctrl->pwr_state != I2C_MSM_PM_SYS_SUSPENDED)
2740 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2741}
2742
2743static int i2c_msm_pm_resume(struct device *dev)
2744{
2745 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2746
2747 if (ctrl->pwr_state == I2C_MSM_PM_RT_ACTIVE)
2748 return 0;
2749
2750 i2c_msm_dbg(ctrl, MSM_DBG, "resuming...");
2751
2752 i2c_msm_clk_path_vote(ctrl);
2753 i2c_msm_pm_clk_prepare(ctrl);
2754 ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
2755 return 0;
2756}
2757
2758#ifdef CONFIG_PM
2759/*
2760 * i2c_msm_pm_sys_suspend_noirq: system power management callback
2761 */
2762static int i2c_msm_pm_sys_suspend_noirq(struct device *dev)
2763{
2764 int ret = 0;
2765 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2766 enum i2c_msm_power_state prev_state = ctrl->pwr_state;
2767
2768 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...");
2769
2770 /* Acquire mutex to ensure current transaction is over */
2771 mutex_lock(&ctrl->xfer.mtx);
2772 ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
2773 mutex_unlock(&ctrl->xfer.mtx);
2774 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...");
2775
2776 if (prev_state == I2C_MSM_PM_RT_ACTIVE) {
2777 i2c_msm_pm_suspend(dev);
2778 /*
2779 * Synchronize runtime-pm and system-pm states:
2780 * at this point we are already suspended. However, the
2781 * runtime-PM framework still thinks that we are active.
2782 * The three calls below let the runtime-PM know that we are
2783 * suspended already without re-invoking the suspend callback
2784 */
2785 pm_runtime_disable(dev);
2786 pm_runtime_set_suspended(dev);
2787 pm_runtime_enable(dev);
2788 }
2789
2790 return ret;
2791}
2792
2793/*
2794 * i2c_msm_pm_sys_resume: system power management callback
2795 * shifts the controller's power state from system suspend to runtime suspend
2796 */
2797static int i2c_msm_pm_sys_resume_noirq(struct device *dev)
2798{
2799 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2800
2801 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: resuming...");
2802 mutex_lock(&ctrl->xfer.mtx);
2803 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2804 mutex_unlock(&ctrl->xfer.mtx);
2805 return 0;
2806}
2807#endif
2808
2809#ifdef CONFIG_PM
2810static void i2c_msm_pm_rt_init(struct device *dev)
2811{
2812 pm_runtime_set_suspended(dev);
2813 pm_runtime_set_autosuspend_delay(dev, (MSEC_PER_SEC >> 2));
2814 pm_runtime_use_autosuspend(dev);
2815 pm_runtime_enable(dev);
2816}
2817
2818/*
2819 * i2c_msm_pm_rt_suspend: runtime power management callback
2820 */
2821static int i2c_msm_pm_rt_suspend(struct device *dev)
2822{
2823 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2824
2825 i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: suspending...");
2826 i2c_msm_pm_suspend(dev);
2827 return 0;
2828}
2829
2830/*
2831 * i2c_msm_pm_rt_resume: runtime power management callback
2832 */
2833static int i2c_msm_pm_rt_resume(struct device *dev)
2834{
2835 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2836
2837 i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: resuming...");
2838 return i2c_msm_pm_resume(dev);
2839}
2840
2841#else
2842static void i2c_msm_pm_rt_init(struct device *dev) {}
2843#define i2c_msm_pm_rt_suspend NULL
2844#define i2c_msm_pm_rt_resume NULL
2845#endif
2846
2847static const struct dev_pm_ops i2c_msm_pm_ops = {
2848#ifdef CONFIG_PM_SLEEP
2849 .suspend_noirq = i2c_msm_pm_sys_suspend_noirq,
2850 .resume_noirq = i2c_msm_pm_sys_resume_noirq,
2851#endif
2852 SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
2853 i2c_msm_pm_rt_resume,
2854 NULL)
2855};
2856
2857static u32 i2c_msm_frmwrk_func(struct i2c_adapter *adap)
2858{
2859 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
2860}
2861
2862static const struct i2c_algorithm i2c_msm_frmwrk_algrtm = {
2863 .master_xfer = i2c_msm_frmwrk_xfer,
2864 .functionality = i2c_msm_frmwrk_func,
2865};
2866
2867static const char * const i2c_msm_adapter_name = "MSM-I2C-v2-adapter";
2868
2869static int i2c_msm_frmwrk_reg(struct platform_device *pdev,
2870 struct i2c_msm_ctrl *ctrl)
2871{
2872 int ret;
2873
2874 i2c_set_adapdata(&ctrl->adapter, ctrl);
2875 ctrl->adapter.algo = &i2c_msm_frmwrk_algrtm;
2876 strlcpy(ctrl->adapter.name, i2c_msm_adapter_name,
2877 sizeof(ctrl->adapter.name));
2878
2879 ctrl->adapter.nr = pdev->id;
2880 ctrl->adapter.dev.parent = &pdev->dev;
2881 ctrl->adapter.dev.of_node = pdev->dev.of_node;
2882 ret = i2c_add_numbered_adapter(&ctrl->adapter);
2883 if (ret) {
2884 dev_err(ctrl->dev, "error i2c_add_adapter failed\n");
2885 return ret;
2886 }
2887
2888 return ret;
2889}
2890
2891static void i2c_msm_frmwrk_unreg(struct i2c_msm_ctrl *ctrl)
2892{
2893 i2c_del_adapter(&ctrl->adapter);
2894}
2895
2896static int i2c_msm_probe(struct platform_device *pdev)
2897{
2898 struct i2c_msm_ctrl *ctrl;
2899 int ret = 0;
2900
2901 dev_info(&pdev->dev, "probing driver i2c-msm-v2\n");
2902
2903 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
2904 if (!ctrl)
2905 return -ENOMEM;
2906 ctrl->dev = &pdev->dev;
2907 platform_set_drvdata(pdev, ctrl);
2908 ctrl->dbgfs.dbg_lvl = DEFAULT_DBG_LVL;
2909 ctrl->dbgfs.force_xfer_mode = I2C_MSM_XFER_MODE_NONE;
2910 mutex_init(&ctrl->xfer.mtx);
2911 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2912
2913 if (!pdev->dev.of_node) {
2914 dev_err(&pdev->dev, "error: null device-tree node");
2915 return -EBADE;
2916 }
2917
2918 ret = i2c_msm_rsrcs_process_dt(ctrl, pdev);
2919 if (ret) {
2920 dev_err(ctrl->dev, "error in process device tree node");
2921 return ret;
2922 }
2923
2924 ret = i2c_msm_rsrcs_mem_init(pdev, ctrl);
2925 if (ret)
2926 goto mem_err;
2927
2928 ret = i2c_msm_rsrcs_clk_init(ctrl);
2929 if (ret)
2930 goto clk_err;
2931
2932 /* vote for clock to enable reading the version number off the HW */
2933 i2c_msm_clk_path_vote(ctrl);
2934
2935 ret = i2c_msm_pm_clk_prepare(ctrl);
2936 if (ret)
2937 goto clk_err;
2938
2939 ret = i2c_msm_pm_clk_enable(ctrl);
2940 if (ret) {
2941 i2c_msm_pm_clk_unprepare(ctrl);
2942 goto clk_err;
2943 }
2944
2945 /*
2946 * reset the core before registering for interrupts. This solves an
2947 * interrupt storm issue when the bootloader leaves a pending interrupt.
2948 */
2949 ret = i2c_msm_qup_sw_reset(ctrl);
2950 if (ret)
2951 dev_err(ctrl->dev, "error error on qup software reset\n");
2952
2953 i2c_msm_pm_clk_disable(ctrl);
2954 i2c_msm_pm_clk_unprepare(ctrl);
2955 i2c_msm_clk_path_unvote(ctrl);
2956
2957 ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
2958 if (ret)
2959 goto err_no_pinctrl;
2960
2961 i2c_msm_pm_rt_init(ctrl->dev);
2962
2963 ret = i2c_msm_rsrcs_irq_init(pdev, ctrl);
2964 if (ret)
2965 goto irq_err;
2966
2967 i2c_msm_dbgfs_init(ctrl);
2968
2969 ret = i2c_msm_frmwrk_reg(pdev, ctrl);
2970 if (ret)
2971 goto reg_err;
2972
2973 i2c_msm_dbg(ctrl, MSM_PROF, "probe() completed with success");
2974 return 0;
2975
2976reg_err:
2977 i2c_msm_dbgfs_teardown(ctrl);
2978 i2c_msm_rsrcs_irq_teardown(ctrl);
2979irq_err:
2980 i2x_msm_blk_free_cache(ctrl);
2981err_no_pinctrl:
2982 i2c_msm_rsrcs_clk_teardown(ctrl);
2983clk_err:
2984 i2c_msm_rsrcs_mem_teardown(ctrl);
2985mem_err:
2986 dev_err(ctrl->dev, "error probe() failed with err:%d\n", ret);
2987 devm_kfree(&pdev->dev, ctrl);
2988 return ret;
2989}
2990
2991static int i2c_msm_remove(struct platform_device *pdev)
2992{
2993 struct i2c_msm_ctrl *ctrl = platform_get_drvdata(pdev);
2994
2995 /* Grab mutex to ensure ongoing transaction is over */
2996 mutex_lock(&ctrl->xfer.mtx);
2997 ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
2998 pm_runtime_disable(ctrl->dev);
2999 /* no one can call a xfer after the next line */
3000 i2c_msm_frmwrk_unreg(ctrl);
3001 mutex_unlock(&ctrl->xfer.mtx);
3002 mutex_destroy(&ctrl->xfer.mtx);
3003
3004 i2c_msm_dma_teardown(ctrl);
3005 i2c_msm_dbgfs_teardown(ctrl);
3006 i2c_msm_rsrcs_irq_teardown(ctrl);
3007 i2c_msm_rsrcs_clk_teardown(ctrl);
3008 i2c_msm_rsrcs_mem_teardown(ctrl);
3009 i2x_msm_blk_free_cache(ctrl);
3010 return 0;
3011}
3012
3013static const struct of_device_id i2c_msm_dt_match[] = {
3014 {
3015 .compatible = "qcom,i2c-msm-v2",
3016 },
3017 {}
3018};
3019
3020static struct platform_driver i2c_msm_driver = {
3021 .probe = i2c_msm_probe,
3022 .remove = i2c_msm_remove,
3023 .driver = {
3024 .name = "i2c-msm-v2",
3025 .owner = THIS_MODULE,
3026 .pm = &i2c_msm_pm_ops,
3027 .of_match_table = i2c_msm_dt_match,
3028 },
3029};
3030
3031static int i2c_msm_init(void)
3032{
3033 return platform_driver_register(&i2c_msm_driver);
3034}
Atanas Filipov2325de52016-09-27 14:37:57 +03003035subsys_initcall(i2c_msm_init);
Shrey Vijayd494f5e2017-07-24 16:08:33 +05303036
3037static void i2c_msm_exit(void)
3038{
3039 platform_driver_unregister(&i2c_msm_driver);
3040}
3041module_exit(i2c_msm_exit);
3042
3043MODULE_LICENSE("GPL v2");
3044MODULE_ALIAS("platform:i2c-msm-v2");