blob: d6c0f458763e67788c0f50cdd3ff8522a0a21247 [file] [log] [blame]
Shrey Vijaydc719202020-04-28 12:38:24 +05301// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014-2019, 2020, The Linux Foundation. All rights reserved.
4 */
5
6/*
7 * I2C controller driver for Qualcomm Technologies Inc platforms
8 */
9
10#define pr_fmt(fmt) "#%d " fmt "\n", __LINE__
11
12#include <linux/module.h>
13#include <linux/clk.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/io.h>
20#include <linux/mutex.h>
21#include <linux/timer.h>
22#include <linux/time.h>
23#include <linux/slab.h>
24#include <linux/pm_runtime.h>
25#include <linux/dma-mapping.h>
26#include <linux/i2c.h>
27#include <linux/of.h>
28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
30#include <linux/msm-sps.h>
31#include <linux/msm-bus.h>
32#include <linux/msm-bus-board.h>
33#include <linux/i2c-msm-v2.h>
34
35#ifdef DEBUG
36static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_DBG;
37#else
38static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_ERR;
39#endif
40
41/* Forward declarations */
42static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl);
43static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
44 struct completion *complete);
45static int i2c_msm_pm_resume(struct device *dev);
46static void i2c_msm_pm_suspend(struct device *dev);
47static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
48static struct pinctrl_state *
49 i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl,
50 const char *name);
51static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
52 bool runtime_active);
53
54/* string table for enum i2c_msm_xfer_mode_id */
55const char * const i2c_msm_mode_str_tbl[] = {
56 "FIFO", "BLOCK", "DMA", "None",
57};
58
59static const u32 i2c_msm_fifo_block_sz_tbl[] = {16, 16, 32, 0};
60
61/* from enum i2c_msm_xfer_mode_id to qup_io_modes register values */
62static const u32 i2c_msm_mode_to_reg_tbl[] = {
63 0x0, /* map I2C_MSM_XFER_MODE_FIFO -> binary 00 */
64 0x1, /* map I2C_MSM_XFER_MODE_BLOCK -> binary 01 */
65 0x3 /* map I2C_MSM_XFER_MODE_DMA -> binary 11 */
66};
67
68const char *i2c_msm_err_str_table[] = {
69 [I2C_MSM_NO_ERR] = "NONE",
70 [I2C_MSM_ERR_NACK] = "NACK: slave not responding, ensure its powered",
71 [I2C_MSM_ERR_ARB_LOST] = "ARB_LOST",
72 [I2C_MSM_ERR_BUS_ERR] = "BUS ERROR:noisy bus/unexpected start/stop tag",
73 [I2C_MSM_ERR_TIMEOUT] = "TIMEOUT_ERROR",
74 [I2C_MSM_ERR_CORE_CLK] = "CLOCK OFF: Check Core Clock",
75 [I2C_MSM_ERR_OVR_UNDR_RUN] = "OVER_UNDER_RUN_ERROR",
76};
77
78static void i2c_msm_dbg_dump_diag(struct i2c_msm_ctrl *ctrl,
79 bool use_param_vals, u32 status, u32 qup_op)
80{
81 struct i2c_msm_xfer *xfer = &ctrl->xfer;
82 const char *str = i2c_msm_err_str_table[xfer->err];
83 char buf[I2C_MSM_REG_2_STR_BUF_SZ];
84
85 if (!use_param_vals) {
86 void __iomem *base = ctrl->rsrcs.base;
87
88 status = readl_relaxed(base + QUP_I2C_STATUS);
89 qup_op = readl_relaxed(base + QUP_OPERATIONAL);
90 }
91
92 if (xfer->err == I2C_MSM_ERR_TIMEOUT) {
93 /*
94 * if we are not the bus master or SDA/SCL is low then it may be
95 * that slave is pulling the lines low. Otherwise it is likely a
96 * GPIO issue
97 */
98 if (!(status & QUP_BUS_MASTER))
99 snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
100 "%s(val:%dmsec) misconfigured GPIO or slave pulling bus line(s) low\n",
101 str, jiffies_to_msecs(xfer->timeout));
102 else
103 snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
104 "%s(val:%dmsec)", str, jiffies_to_msecs(xfer->timeout));
105
106 str = buf;
107 }
108
109 /* dump xfer details */
110 dev_err(ctrl->dev,
111 "%s: msgs(n:%d cur:%d %s) bc(rx:%zu tx:%zu) mode:%s slv_addr:0x%0x MSTR_STS:0x%08x OPER:0x%08x\n",
112 str, xfer->msg_cnt, xfer->cur_buf.msg_idx,
113 xfer->cur_buf.is_rx ? "rx" : "tx", xfer->rx_cnt, xfer->tx_cnt,
114 i2c_msm_mode_str_tbl[xfer->mode_id], xfer->msgs->addr,
115 status, qup_op);
116}
117
118static u32 i2c_msm_reg_io_modes_out_blk_sz(u32 qup_io_modes)
119{
120 return i2c_msm_fifo_block_sz_tbl[qup_io_modes & 0x3];
121}
122
123static u32 i2c_msm_reg_io_modes_in_blk_sz(u32 qup_io_modes)
124{
125 return i2c_msm_fifo_block_sz_tbl[BITS_AT(qup_io_modes, 5, 2)];
126}
127
128static const u32 i2c_msm_fifo_sz_table[] = {2, 4, 8, 16};
129
130static void i2c_msm_qup_fifo_calc_size(struct i2c_msm_ctrl *ctrl)
131{
132 u32 reg_data, output_fifo_size, input_fifo_size;
133 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
134
135 /* Gurad to read fifo size only once. It hard wired and never changes */
136 if (fifo->input_fifo_sz && fifo->output_fifo_sz)
137 return;
138
139 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
140 output_fifo_size = BITS_AT(reg_data, 2, 2);
141 input_fifo_size = BITS_AT(reg_data, 7, 2);
142
143 fifo->input_fifo_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data) *
144 i2c_msm_fifo_sz_table[input_fifo_size];
145 fifo->output_fifo_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data) *
146 i2c_msm_fifo_sz_table[output_fifo_size];
147
148 i2c_msm_dbg(ctrl, MSM_PROF, "QUP input-sz:%zu, input-sz:%zu\n",
149 fifo->input_fifo_sz, fifo->output_fifo_sz);
150
151}
152
153/*
154 * i2c_msm_tag_byte: accessor for tag as four bytes array
155 */
156static u8 *i2c_msm_tag_byte(struct i2c_msm_tag *tag, int byte_n)
157{
158 return ((u8 *)tag) + byte_n;
159}
160
161/*
162 * i2c_msm_buf_to_ptr: translates a xfer buf to a pointer into the i2c_msg data
163 */
164static u8 *i2c_msm_buf_to_ptr(struct i2c_msm_xfer_buf *buf)
165{
166 struct i2c_msm_xfer *xfer =
167 container_of(buf, struct i2c_msm_xfer, cur_buf);
168 struct i2c_msg *msg = xfer->msgs + buf->msg_idx;
169
170 return msg->buf + buf->byte_idx;
171}
172
173/*
174 * tag_lookup_table[is_new_addr][is_last][is_rx]
175 * @is_new_addr Is start tag required? (which requires two more bytes.)
176 * @is_last Use the XXXXX_N_STOP tag variant
177 * @is_rx READ/WRITE
178 */
179static const struct i2c_msm_tag tag_lookup_table[2][2][2] = {
180 {{{QUP_TAG2_DATA_WRITE, 2},
181 {QUP_TAG2_DATA_READ, 2} },
182 /* last buffer */
183 {{QUP_TAG2_DATA_WRITE_N_STOP, 2},
184 {QUP_TAG2_DATA_READ_N_STOP, 2} } },
185 /* new addr */
186 {{{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE << 16), 4},
187 {QUP_TAG2_START | (QUP_TAG2_DATA_READ << 16), 4} },
188 /* last buffer + new addr */
189 {{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE_N_STOP << 16), 4},
190 {QUP_TAG2_START | (QUP_TAG2_DATA_READ_N_STOP << 16), 4} } },
191};
192
193/*
194 * i2c_msm_tag_create: format a qup tag ver2
195 */
196static struct i2c_msm_tag i2c_msm_tag_create(bool is_new_addr, bool is_last_buf,
197 bool is_rx, u8 buf_len, u8 slave_addr)
198{
199 struct i2c_msm_tag tag;
200 /* Normalize booleans to 1 or 0 */
201 is_new_addr = is_new_addr ? 1 : 0;
202 is_last_buf = is_last_buf ? 1 : 0;
203 is_rx = is_rx ? 1 : 0;
204
205 tag = tag_lookup_table[is_new_addr][is_last_buf][is_rx];
206 /* fill in the non-const value: the address and the length */
207 if (tag.len == I2C_MSM_TAG2_MAX_LEN) {
208 *i2c_msm_tag_byte(&tag, 1) = slave_addr;
209 *i2c_msm_tag_byte(&tag, 3) = buf_len;
210 } else {
211 *i2c_msm_tag_byte(&tag, 1) = buf_len;
212 }
213
214 return tag;
215}
216
217static int
218i2c_msm_qup_state_wait_valid(struct i2c_msm_ctrl *ctrl,
219 enum i2c_msm_qup_state state, bool only_valid)
220{
221 u32 status;
222 void __iomem *base = ctrl->rsrcs.base;
223 int ret = 0;
224 int read_cnt = 0;
225
226 do {
227 status = readl_relaxed(base + QUP_STATE);
228 ++read_cnt;
229
230 /*
231 * If only valid bit needs to be checked, requested state is
232 * 'don't care'
233 */
234 if (status & QUP_STATE_VALID) {
235 if (only_valid)
236 goto poll_valid_end;
237 else if ((state & QUP_I2C_MAST_GEN) &&
238 (status & QUP_I2C_MAST_GEN))
239 goto poll_valid_end;
240 else if ((status & QUP_STATE_MASK) == state)
241 goto poll_valid_end;
242 }
243
244 /*
245 * Sleeping for 1-1.5 ms for every 100 iterations and break if
246 * iterations crosses the 1500 marks allows roughly 10-15 msec
247 * of time to get the core to valid state.
248 */
249 if (!(read_cnt % 100))
250 usleep_range(1000, 1500);
251 } while (read_cnt <= 1500);
252
253 ret = -ETIMEDOUT;
254 dev_err(ctrl->dev,
255 "error timeout on polling for valid state. check core_clk\n");
256
257poll_valid_end:
258 if (!only_valid)
259 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_VALID_END,
260 /* aggregate ret and state */
261 (((-ret) & 0xff) | ((state & 0xf) << 16)),
262 read_cnt, status);
263
264 return ret;
265}
266
267static int i2c_msm_qup_state_set(struct i2c_msm_ctrl *ctrl,
268 enum i2c_msm_qup_state state)
269{
270 if (i2c_msm_qup_state_wait_valid(ctrl, 0, true))
271 return -EIO;
272
273 writel_relaxed(state, ctrl->rsrcs.base + QUP_STATE);
274
275 if (i2c_msm_qup_state_wait_valid(ctrl, state, false))
276 return -EIO;
277
278 return 0;
279}
280
281static int i2c_msm_qup_sw_reset(struct i2c_msm_ctrl *ctrl)
282{
283 int ret;
284
285 writel_relaxed(1, ctrl->rsrcs.base + QUP_SW_RESET);
286 /*
287 * Ensure that QUP that reset state is written before waiting for a the
288 * reset state to be valid.
289 */
290 wmb();
291 ret = i2c_msm_qup_state_wait_valid(ctrl, QUP_STATE_RESET, false);
292 if (ret) {
293 if (atomic_read(&ctrl->xfer.is_active))
294 ctrl->xfer.err = I2C_MSM_ERR_CORE_CLK;
295 dev_err(ctrl->dev, "error on issuing QUP software-reset\n");
296 }
297 return ret;
298}
299
300/*
301 * i2c_msm_qup_xfer_init_reset_state: setup QUP registers for the next run state
302 * @pre QUP must be in reset state.
303 * @pre xfer->mode_id is set to the chosen transfer state
304 * @post update values in QUP_MX_*_COUNT, QUP_CONFIG, QUP_IO_MODES,
305 * and QUP_OPERATIONAL_MASK registers
306 */
307static void
308i2c_msm_qup_xfer_init_reset_state(struct i2c_msm_ctrl *ctrl)
309{
310 struct i2c_msm_xfer *xfer = &ctrl->xfer;
311 void __iomem * const base = ctrl->rsrcs.base;
312 u32 mx_rd_cnt = 0;
313 u32 mx_wr_cnt = 0;
314 u32 mx_in_cnt = 0;
315 u32 mx_out_cnt = 0;
316 u32 no_input = 0;
317 u32 no_output = 0;
318 u32 input_mode = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 12;
319 u32 output_mode = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 10;
320 u32 config_reg;
321 u32 io_modes_reg;
322 u32 op_mask;
323 u32 rx_cnt = 0;
324 u32 tx_cnt = 0;
325 /*
326 * DMA mode:
327 * 1. QUP_MX_*_COUNT must be zero in all cases.
328 * 2. both QUP_NO_INPUT and QUP_NO_OUTPUT are unset.
329 * FIFO mode:
330 * 1. QUP_MX_INPUT_COUNT and QUP_MX_OUTPUT_COUNT are zero
331 * 2. QUP_MX_READ_COUNT and QUP_MX_WRITE_COUNT reflect true count
332 * 3. QUP_NO_INPUT and QUP_NO_OUTPUT are set according to counts
333 */
334 if (xfer->mode_id != I2C_MSM_XFER_MODE_DMA) {
335 rx_cnt = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
336 tx_cnt = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
337 no_input = rx_cnt ? 0 : QUP_NO_INPUT;
338
339 switch (xfer->mode_id) {
340 case I2C_MSM_XFER_MODE_FIFO:
341 mx_rd_cnt = rx_cnt;
342 mx_wr_cnt = tx_cnt;
343 break;
344 case I2C_MSM_XFER_MODE_BLOCK:
345 mx_in_cnt = rx_cnt;
346 mx_out_cnt = tx_cnt;
347 break;
348 default:
349 break;
350 }
351 }
352
353 /* init DMA/BLOCK modes counter */
354 writel_relaxed(mx_in_cnt, base + QUP_MX_INPUT_COUNT);
355 writel_relaxed(mx_out_cnt, base + QUP_MX_OUTPUT_COUNT);
356
357 /* int FIFO mode counter */
358 writel_relaxed(mx_rd_cnt, base + QUP_MX_READ_COUNT);
359 writel_relaxed(mx_wr_cnt, base + QUP_MX_WRITE_COUNT);
360
361 /*
362 * Set QUP mini-core to I2C tags ver-2
363 * sets NO_INPUT / NO_OUTPUT as needed
364 */
365 config_reg = readl_relaxed(base + QUP_CONFIG);
366 config_reg &=
367 ~(QUP_NO_INPUT | QUP_NO_OUTPUT | QUP_N_MASK | QUP_MINI_CORE_MASK);
368 config_reg |= (no_input | no_output | QUP_N_VAL |
369 QUP_MINI_CORE_I2C_VAL);
370 writel_relaxed(config_reg, base + QUP_CONFIG);
371
372 /*
373 * Turns-on packing/unpacking
374 * sets NO_INPUT / NO_OUTPUT as needed
375 */
376 io_modes_reg = readl_relaxed(base + QUP_IO_MODES);
377 io_modes_reg &=
378 ~(QUP_INPUT_MODE | QUP_OUTPUT_MODE | QUP_PACK_EN | QUP_UNPACK_EN
379 | QUP_OUTPUT_BIT_SHIFT_EN);
380 io_modes_reg |=
381 (input_mode | output_mode | QUP_PACK_EN | QUP_UNPACK_EN);
382 writel_relaxed(io_modes_reg, base + QUP_IO_MODES);
383
384 /*
385 * mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
386 * change on DMA-mode transfers
387 */
388 op_mask = (xfer->mode_id == I2C_MSM_XFER_MODE_DMA) ?
389 (QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK) : 0;
390 writel_relaxed(op_mask, base + QUP_OPERATIONAL_MASK);
391 /* Ensure that QUP configuration is written before leaving this func */
392 wmb();
393}
394
395/*
396 * i2c_msm_clk_div_fld:
397 * @clk_freq_out output clock frequency
398 * @fs_div fs divider value
399 * @ht_div high time divider value
400 */
401struct i2c_msm_clk_div_fld {
402 u32 clk_freq_out;
403 u8 fs_div;
404 u8 ht_div;
405};
406
407/*
408 * divider values as per HW Designers
409 */
410static struct i2c_msm_clk_div_fld i2c_msm_clk_div_map[] = {
411 {KHz(100), 124, 62},
412 {KHz(400), 28, 14},
413 {KHz(1000), 8, 5},
414};
415
416/*
417 * @return zero on success
418 * @fs_div when zero use value from table above, otherwise use given value
419 * @ht_div when zero use value from table above, otherwise use given value
420 *
421 * Format the value to be configured into the clock divider register. This
422 * register is configured every time core is moved from reset to run state.
423 */
424static int i2c_msm_set_mstr_clk_ctl(struct i2c_msm_ctrl *ctrl, int fs_div,
425 int ht_div, int noise_rjct_scl, int noise_rjct_sda)
426{
427 int ret = 0;
428 int i;
429 u32 reg_val = 0;
430 struct i2c_msm_clk_div_fld *itr = i2c_msm_clk_div_map;
431
432 /* set noise rejection values for scl and sda */
433 reg_val = I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rjct_scl);
434 reg_val = I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rjct_sda);
435
436 /*
437 * find matching freq and set divider values unless they are forced
438 * from parametr list
439 */
440 for (i = 0; i < ARRAY_SIZE(i2c_msm_clk_div_map); ++i, ++itr) {
441 if (ctrl->rsrcs.clk_freq_out == itr->clk_freq_out) {
442 if (!fs_div)
443 fs_div = itr->fs_div;
444 if (!ht_div)
445 ht_div = itr->ht_div;
446 break;
447 }
448 }
449
450 /* For non-standard clock freq, clk divider value
451 * fs_div should be supplied by client through device tree
452 */
453 if (!fs_div) {
454 dev_err(ctrl->dev, "Missing clk divider value in DT for %dKHz\n",
455 (ctrl->rsrcs.clk_freq_out / 1000));
456 return -EINVAL;
457 }
458
459 /* format values in clk-ctl cache */
460 ctrl->mstr_clk_ctl = (reg_val & (~0xff07ff)) | ((ht_div & 0xff) << 16)
461 |(fs_div & 0xff);
462
463 return ret;
464}
465
466/*
467 * i2c_msm_qup_xfer_init_run_state: set qup regs which must be set *after* reset
468 */
469static void i2c_msm_qup_xfer_init_run_state(struct i2c_msm_ctrl *ctrl)
470{
471 void __iomem *base = ctrl->rsrcs.base;
472
473 writel_relaxed(ctrl->mstr_clk_ctl, base + QUP_I2C_MASTER_CLK_CTL);
474
475 /* Ensure that QUP configuration is written before leaving this func */
476 wmb();
477
478 if (ctrl->dbgfs.dbg_lvl == MSM_DBG) {
479 dev_info(ctrl->dev,
480 "QUP state after programming for next transfers\n");
481 i2c_msm_dbg_qup_reg_dump(ctrl);
482 }
483}
484
485static void i2c_msm_fifo_wr_word(struct i2c_msm_ctrl *ctrl, u32 data)
486{
487 writel_relaxed(data, ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
488 i2c_msm_dbg(ctrl, MSM_DBG, "OUT-FIFO:0x%08x\n", data);
489}
490
491static u32 i2c_msm_fifo_rd_word(struct i2c_msm_ctrl *ctrl, u32 *data)
492{
493 u32 val;
494
495 val = readl_relaxed(ctrl->rsrcs.base + QUP_IN_FIFO_BASE);
496 i2c_msm_dbg(ctrl, MSM_DBG, "IN-FIFO :0x%08x\n", val);
497
498 if (data)
499 *data = val;
500
501 return val;
502}
503
504/*
505 * i2c_msm_fifo_wr_buf_flush:
506 */
507static void i2c_msm_fifo_wr_buf_flush(struct i2c_msm_ctrl *ctrl)
508{
509 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
510 u32 *word;
511
512 if (!fifo->out_buf_idx)
513 return;
514
515 word = (u32 *) fifo->out_buf;
516 i2c_msm_fifo_wr_word(ctrl, *word);
517 fifo->out_buf_idx = 0;
518 *word = 0;
519}
520
521/*
522 * i2c_msm_fifo_wr_buf:
523 *
524 * @len buf size (in bytes)
525 * @return number of bytes from buf which have been processed (written to
526 * FIFO or kept in out buffer and will be written later)
527 */
528static size_t
529i2c_msm_fifo_wr_buf(struct i2c_msm_ctrl *ctrl, u8 *buf, size_t len)
530{
531 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
532 int i;
533
534 for (i = 0 ; i < len; ++i, ++buf) {
535
536 fifo->out_buf[fifo->out_buf_idx] = *buf;
537 ++fifo->out_buf_idx;
538
539 if (fifo->out_buf_idx == 4) {
540 u32 *word = (u32 *) fifo->out_buf;
541
542 i2c_msm_fifo_wr_word(ctrl, *word);
543 fifo->out_buf_idx = 0;
544 *word = 0;
545 }
546 }
547 return i;
548}
549
550static size_t i2c_msm_fifo_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
551{
552 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
553 size_t len = 0;
554
555 if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
556 char str[I2C_MSM_REG_2_STR_BUF_SZ];
557
558 dev_info(ctrl->dev, "tag.val:0x%llx tag.len:%d %s\n",
559 buf->out_tag.val, buf->out_tag.len,
560 i2c_msm_dbg_tag_to_str(&buf->out_tag, str,
561 sizeof(str)));
562 }
563
564 if (buf->out_tag.len) {
565 len = i2c_msm_fifo_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
566 buf->out_tag.len);
567
568 if (len < buf->out_tag.len)
569 goto done;
570
571 buf->out_tag = (struct i2c_msm_tag) {0};
572 }
573done:
574 return len;
575}
576
577/*
578 * i2c_msm_fifo_read: reads up to fifo size into user's buf
579 */
580static void i2c_msm_fifo_read_xfer_buf(struct i2c_msm_ctrl *ctrl)
581{
582 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
583 struct i2c_msg *msg = ctrl->xfer.msgs + buf->msg_idx;
584 u8 *p_tag_val = (u8 *) &buf->in_tag.val;
585 int buf_need_bc = msg->len - buf->byte_idx;
586 u8 word[4];
587 int copy_bc;
588 int word_idx;
589 int word_bc;
590
591 if (!buf->is_rx)
592 return;
593
594 while (buf_need_bc || buf->in_tag.len) {
595 i2c_msm_fifo_rd_word(ctrl, (u32 *) word);
596 word_bc = sizeof(word);
597 word_idx = 0;
598
599 /*
600 * copy bytes from fifo word to tag.
601 * @note buf->in_tag.len (max 2bytes) < word_bc (4bytes)
602 */
603 if (buf->in_tag.len) {
604 copy_bc = min_t(int, word_bc, buf->in_tag.len);
605
606 memcpy(p_tag_val + buf->in_tag.len, word, copy_bc);
607
608 word_idx += copy_bc;
609 word_bc -= copy_bc;
610 buf->in_tag.len -= copy_bc;
611
612 if ((ctrl->dbgfs.dbg_lvl >= MSM_DBG) &&
613 !buf->in_tag.len) {
614 char str[64];
615
616 dev_info(ctrl->dev, "%s\n",
617 i2c_msm_dbg_tag_to_str(&buf->in_tag,
618 str, sizeof(str)));
619 }
620 }
621
622 /* copy bytes from fifo word to user's buffer */
623 copy_bc = min_t(int, word_bc, buf_need_bc);
624 memcpy(msg->buf + buf->byte_idx, word + word_idx, copy_bc);
625
626 buf->byte_idx += copy_bc;
627 buf_need_bc -= copy_bc;
628 }
629}
630
631/*
632 * i2c_msm_fifo_write_xfer_buf: write xfer.cur_buf (user's-buf + tag) to fifo
633 */
634static void i2c_msm_fifo_write_xfer_buf(struct i2c_msm_ctrl *ctrl)
635{
636 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
637 size_t len;
638 size_t tag_len;
639
640 tag_len = buf->out_tag.len;
641 len = i2c_msm_fifo_xfer_wr_tag(ctrl);
642 if (len < tag_len) {
643 dev_err(ctrl->dev, "error on writing tag to out FIFO\n");
644 return;
645 }
646
647 if (!buf->is_rx) {
648 if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
649 char str[I2C_MSM_REG_2_STR_BUF_SZ];
650 int offset = 0;
651 u8 *p = i2c_msm_buf_to_ptr(buf);
652 int i;
653
654 for (i = 0 ; i < len; ++i, ++p)
655 offset += scnprintf(str + offset,
656 sizeof(str) - offset,
657 "0x%x ", *p);
658 dev_info(ctrl->dev, "data: %s\n", str);
659 }
660
661 len = i2c_msm_fifo_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf),
662 buf->len);
663 if (len < buf->len)
664 dev_err(ctrl->dev, "error on xfering buf with FIFO\n");
665 }
666}
667
668/*
669 * i2c_msm_fifo_xfer_process:
670 *
671 * @pre transfer size is less then or equal to fifo size.
672 * @pre QUP in run state/pause
673 * @return zero on success
674 */
675static int i2c_msm_fifo_xfer_process(struct i2c_msm_ctrl *ctrl)
676{
677 struct i2c_msm_xfer_buf first_buf = ctrl->xfer.cur_buf;
678 int ret;
679
680 /* load fifo while in pause state to avoid race conditions */
681 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
682 if (ret < 0)
683 return ret;
684
685 /* write all that goes to output fifo */
686 while (i2c_msm_xfer_next_buf(ctrl))
687 i2c_msm_fifo_write_xfer_buf(ctrl);
688
689 i2c_msm_fifo_wr_buf_flush(ctrl);
690
691 ctrl->xfer.cur_buf = first_buf;
692
693 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
694 if (ret < 0)
695 return ret;
696
697 /* wait for input done interrupt */
698 ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
699 if (ret < 0)
700 return ret;
701
702 /* read all from input fifo */
703 while (i2c_msm_xfer_next_buf(ctrl))
704 i2c_msm_fifo_read_xfer_buf(ctrl);
705
706 return 0;
707}
708
709/*
710 * i2c_msm_fifo_xfer: process transfer using fifo mode
711 */
712static int i2c_msm_fifo_xfer(struct i2c_msm_ctrl *ctrl)
713{
714 int ret;
715
716 i2c_msm_dbg(ctrl, MSM_DBG, "Starting FIFO transfer\n");
717
718 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
719 if (ret < 0)
720 return ret;
721
722 /* program qup registers */
723 i2c_msm_qup_xfer_init_reset_state(ctrl);
724
725 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
726 if (ret < 0)
727 return ret;
728
729 /* program qup registers which must be set *after* reset */
730 i2c_msm_qup_xfer_init_run_state(ctrl);
731
732 ret = i2c_msm_fifo_xfer_process(ctrl);
733
734 return ret;
735}
736
737/*
738 * i2c_msm_blk_init_struct: Allocate memory and initialize blk structure
739 *
740 * @return 0 on success or error code
741 */
742static int i2c_msm_blk_init_struct(struct i2c_msm_ctrl *ctrl)
743{
744 u32 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
745 int ret;
746 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
747
748 blk->in_blk_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data),
749 blk->out_blk_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data),
750
751 blk->tx_cache = kmalloc(blk->out_blk_sz, GFP_KERNEL);
752 if (!blk->tx_cache) {
753 ret = -ENOMEM;
754 goto out_buf_err;
755 }
756
757 blk->rx_cache = kmalloc(blk->in_blk_sz, GFP_KERNEL);
758 if (!blk->tx_cache) {
759 ret = -ENOMEM;
760 goto in_buf_err;
761 }
762
763 blk->is_init = true;
764 return 0;
765
766in_buf_err:
767 kfree(blk->tx_cache);
768out_buf_err:
769
770 return ret;
771}
772
773/*
774 * i2c_msm_blk_wr_flush: flushes internal cached block to FIFO
775 *
776 * @return 0 on success or error code
777 */
778static int i2c_msm_blk_wr_flush(struct i2c_msm_ctrl *ctrl)
779{
780 int byte_num;
781 int ret = 0;
782 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
783 u32 *buf_u32_ptr;
784
785 if (!blk->tx_cache_idx)
786 return 0;
787
788 /* if no blocks available wait for interrupt */
789 ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_tx_blk);
790 if (ret)
791 return ret;
792
793 /*
794 * pause the controller until we finish loading the block in order to
795 * avoid race conditions
796 */
797 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
798 if (ret < 0)
799 return ret;
800 i2c_msm_dbg(ctrl, MSM_DBG, "OUT-BLK:%*phC\n", blk->tx_cache_idx,
801 blk->tx_cache);
802
803 for (byte_num = 0; byte_num < blk->tx_cache_idx;
804 byte_num += sizeof(u32)) {
805 buf_u32_ptr = (u32 *) (blk->tx_cache + byte_num);
806 writel_relaxed(*buf_u32_ptr,
807 ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
808 *buf_u32_ptr = 0;
809 }
810
811 /* now cache is empty */
812 blk->tx_cache_idx = 0;
813 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
814 if (ret < 0)
815 return ret;
816
817 return ret;
818}
819
820/*
821 * i2c_msm_blk_wr_buf:
822 *
823 * @len buf size (in bytes)
824 * @return number of bytes from buf which have been processed (written to
825 * FIFO or kept in out buffer and will be written later)
826 */
827static int
828i2c_msm_blk_wr_buf(struct i2c_msm_ctrl *ctrl, const u8 *buf, int len)
829{
830 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
831 int byte_num;
832 int ret = 0;
833
834 for (byte_num = 0; byte_num < len; ++byte_num, ++buf) {
835 blk->tx_cache[blk->tx_cache_idx] = *buf;
836 ++blk->tx_cache_idx;
837
838 /* flush cached buffer to HW FIFO when full */
839 if (blk->tx_cache_idx == blk->out_blk_sz) {
840 ret = i2c_msm_blk_wr_flush(ctrl);
841 if (ret)
842 return ret;
843 }
844 }
845 return byte_num;
846}
847
848/*
849 * i2c_msm_blk_xfer_wr_tag: buffered writing the tag of current buf
850 * @return zero on success
851 */
852static int i2c_msm_blk_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
853{
854 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
855 int len = 0;
856
857 if (!buf->out_tag.len)
858 return 0;
859
860 len = i2c_msm_blk_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
861 buf->out_tag.len);
862 if (len != buf->out_tag.len)
863 return -EFAULT;
864
865 buf->out_tag = (struct i2c_msm_tag) {0};
866 return 0;
867}
868
869/*
870 * i2c_msm_blk_wr_xfer_buf: writes ctrl->xfer.cur_buf to HW
871 *
872 * @return zero on success
873 */
874static int i2c_msm_blk_wr_xfer_buf(struct i2c_msm_ctrl *ctrl)
875{
876 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
877 int len;
878 int ret;
879
880 ret = i2c_msm_blk_xfer_wr_tag(ctrl);
881 if (ret)
882 return ret;
883
884 len = i2c_msm_blk_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf), buf->len);
885 if (len < buf->len)
886 return -EFAULT;
887
888 buf->byte_idx += len;
889 return 0;
890}
891
892/*
893 * i2c_msm_blk_rd_blk: read a block from HW FIFO to internal cache
894 *
895 * @return number of bytes read or negative error value
896 * @need_bc number of bytes that we need
897 *
898 * uses internal counter to keep track of number of available blocks. When
899 * zero, waits for interrupt.
900 */
901static int i2c_msm_blk_rd_blk(struct i2c_msm_ctrl *ctrl, int need_bc)
902{
903 int byte_num;
904 int ret = 0;
905 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
906 u32 *cache_ptr = (u32 *) blk->rx_cache;
907 int read_bc = min_t(int, blk->in_blk_sz, need_bc);
908
909 /* wait for block avialble interrupt */
910 ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_rx_blk);
911 if (ret)
912 return ret;
913
914 /* Read block from HW to cache */
915 for (byte_num = 0; byte_num < blk->in_blk_sz;
916 byte_num += sizeof(u32)) {
917 if (byte_num < read_bc) {
918 *cache_ptr = readl_relaxed(ctrl->rsrcs.base +
919 QUP_IN_FIFO_BASE);
920 ++cache_ptr;
921 }
922 }
923 blk->rx_cache_idx = 0;
924 return read_bc;
925}
926
927/*
928 * i2c_msm_blk_rd_xfer_buf: fill in ctrl->xfer.cur_buf from HW
929 *
930 * @return zero on success
931 */
932static int i2c_msm_blk_rd_xfer_buf(struct i2c_msm_ctrl *ctrl)
933{
934 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
935 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
936 struct i2c_msg *msg = ctrl->xfer.msgs + buf->msg_idx;
937 int copy_bc; /* number of bytes to copy to user's buffer */
938 int cache_avail_bc;
939 int ret = 0;
940
941 /* write tag to out FIFO */
942 ret = i2c_msm_blk_xfer_wr_tag(ctrl);
943 if (ret)
944 return ret;
945 i2c_msm_blk_wr_flush(ctrl);
946
947 while (buf->len || buf->in_tag.len) {
948 cache_avail_bc = i2c_msm_blk_rd_blk(ctrl,
949 buf->len + buf->in_tag.len);
950
951 i2c_msm_dbg(ctrl, MSM_DBG, "IN-BLK:%*phC\n", cache_avail_bc,
952 blk->rx_cache + blk->rx_cache_idx);
953
954 if (cache_avail_bc < 0)
955 return cache_avail_bc;
956
957 /* discard tag from input FIFO */
958 if (buf->in_tag.len) {
959 int discard_bc = min_t(int, cache_avail_bc,
960 buf->in_tag.len);
961 blk->rx_cache_idx += discard_bc;
962 buf->in_tag.len -= discard_bc;
963 cache_avail_bc -= discard_bc;
964 }
965
966 /* copy bytes from cached block to user's buffer */
967 copy_bc = min_t(int, cache_avail_bc, buf->len);
968 memcpy(msg->buf + buf->byte_idx,
969 blk->rx_cache + blk->rx_cache_idx, copy_bc);
970
971 blk->rx_cache_idx += copy_bc;
972 buf->len -= copy_bc;
973 buf->byte_idx += copy_bc;
974 }
975 return ret;
976}
977
978/*
979 * i2c_msm_blk_xfer: process transfer using block mode
980 */
981static int i2c_msm_blk_xfer(struct i2c_msm_ctrl *ctrl)
982{
983 int ret = 0;
984 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
985 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
986
987 if (!blk->is_init) {
988 ret = i2c_msm_blk_init_struct(ctrl);
989 if (!blk->is_init)
990 return ret;
991 }
992
993 init_completion(&blk->wait_rx_blk);
994 init_completion(&blk->wait_tx_blk);
995
996 /* tx_cnt > 0 always */
997 blk->complete_mask = QUP_MAX_OUTPUT_DONE_FLAG;
998 if (ctrl->xfer.rx_cnt)
999 blk->complete_mask |= QUP_MAX_INPUT_DONE_FLAG;
1000
1001 /* initialize block mode for new transfer */
1002 blk->tx_cache_idx = 0;
1003 blk->rx_cache_idx = 0;
1004
1005 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1006 if (ret < 0)
1007 return ret;
1008
1009 /* program qup registers */
1010 i2c_msm_qup_xfer_init_reset_state(ctrl);
1011
1012 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1013 if (ret < 0)
1014 return ret;
1015
1016 /* program qup registers which must be set *after* reset */
1017 i2c_msm_qup_xfer_init_run_state(ctrl);
1018
1019 while (i2c_msm_xfer_next_buf(ctrl)) {
1020 if (buf->is_rx) {
1021 ret = i2c_msm_blk_rd_xfer_buf(ctrl);
1022 if (ret)
1023 return ret;
1024 /*
1025 * SW workaround to wait for extra interrupt from
1026 * hardware for last block in block mode for read
1027 */
1028 if (buf->is_last) {
1029 ret = i2c_msm_xfer_wait_for_completion(ctrl,
1030 &blk->wait_rx_blk);
1031 if (!ret)
1032 complete(&ctrl->xfer.complete);
1033 }
1034 } else {
1035 ret = i2c_msm_blk_wr_xfer_buf(ctrl);
1036 if (ret)
1037 return ret;
1038 }
1039 }
1040 i2c_msm_blk_wr_flush(ctrl);
1041 return i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
1042}
1043
1044/*
1045 * i2c_msm_dma_xfer_prepare: map DMA buffers, and create tags.
1046 * @return zero on success or negative error value
1047 */
1048static int i2c_msm_dma_xfer_prepare(struct i2c_msm_ctrl *ctrl)
1049{
1050 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1051 struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
1052 struct i2c_msm_dma_chan *tx = &dma->chan[I2C_MSM_DMA_TX];
1053 struct i2c_msm_dma_chan *rx = &dma->chan[I2C_MSM_DMA_RX];
1054 struct i2c_msm_dma_buf *dma_buf;
1055 int rem_buf_cnt = I2C_MSM_DMA_DESC_ARR_SIZ;
1056 struct i2c_msg *cur_msg;
1057 enum dma_data_direction buf_dma_dirctn;
1058 struct i2c_msm_dma_mem data;
1059 u8 *tag_arr_itr_vrtl_addr;
1060 dma_addr_t tag_arr_itr_phy_addr;
1061
1062 tx->desc_cnt_cur = 0;
1063 rx->desc_cnt_cur = 0;
1064 dma->buf_arr_cnt = 0;
1065 dma_buf = dma->buf_arr;
1066 tag_arr_itr_vrtl_addr = ((u8 *) dma->tag_arr.vrtl_addr);
1067 tag_arr_itr_phy_addr = dma->tag_arr.phy_addr;
1068
1069 for (; i2c_msm_xfer_next_buf(ctrl) && rem_buf_cnt;
1070 ++dma_buf,
1071 tag_arr_itr_phy_addr += sizeof(dma_addr_t),
1072 tag_arr_itr_vrtl_addr += sizeof(dma_addr_t)) {
1073
1074 /* dma-map the client's message */
1075 cur_msg = ctrl->xfer.msgs + buf->msg_idx;
1076 data.vrtl_addr = cur_msg->buf + buf->byte_idx;
1077 if (buf->is_rx) {
1078 buf_dma_dirctn = DMA_FROM_DEVICE;
1079 rx->desc_cnt_cur += 2; /* msg + tag */
1080 tx->desc_cnt_cur += 1; /* tag */
1081 } else {
1082 buf_dma_dirctn = DMA_TO_DEVICE;
1083 tx->desc_cnt_cur += 2; /* msg + tag */
1084 }
1085
1086 /* for last buffer in a transfer msg */
1087 if (buf->is_last) {
1088 /* add ovrhead byte cnt for tags specific to DMA mode */
1089 ctrl->xfer.rx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags*/
1090 ctrl->xfer.tx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags */
1091
1092 /* increment rx desc cnt to read off tags and
1093 * increment tx desc cnt to queue EOT+FLUSH_STOP tags
1094 */
1095 tx->desc_cnt_cur++;
1096 rx->desc_cnt_cur++;
1097 }
1098
1099 if ((rx->desc_cnt_cur >= I2C_MSM_DMA_RX_SZ) ||
1100 (tx->desc_cnt_cur >= I2C_MSM_DMA_TX_SZ))
1101 return -ENOMEM;
1102
1103 data.phy_addr = dma_map_single(ctrl->dev, data.vrtl_addr,
1104 buf->len, buf_dma_dirctn);
1105
1106 if (dma_mapping_error(ctrl->dev, data.phy_addr)) {
1107 dev_err(ctrl->dev,
1108 "error DMA mapping DMA buffers, err:%lld buf_vrtl:0x%pK data_len:%d dma_dir:%s\n",
1109 (u64) data.phy_addr, data.vrtl_addr, buf->len,
1110 ((buf_dma_dirctn == DMA_FROM_DEVICE)
1111 ? "DMA_FROM_DEVICE" : "DMA_TO_DEVICE"));
1112 return -EFAULT;
1113 }
1114
1115 /* copy 8 bytes. Only tag.len bytes will be used */
1116 *((u64 *)tag_arr_itr_vrtl_addr) = buf->out_tag.val;
1117
1118 i2c_msm_dbg(ctrl, MSM_DBG,
1119 "vrtl:0x%pK phy:0x%llx val:0x%llx sizeof(dma_addr_t):%zu\n",
1120 tag_arr_itr_vrtl_addr, (u64) tag_arr_itr_phy_addr,
1121 *((u64 *)tag_arr_itr_vrtl_addr), sizeof(dma_addr_t));
1122
1123 /*
1124 * create dma buf, in the dma buf arr, based on the buf created
1125 * by i2c_msm_xfer_next_buf()
1126 */
1127 *dma_buf = (struct i2c_msm_dma_buf) {
1128 .ptr = data,
1129 .len = buf->len,
1130 .dma_dir = buf_dma_dirctn,
1131 .is_rx = buf->is_rx,
1132 .is_last = buf->is_last,
1133 .tag = (struct i2c_msm_dma_tag) {
1134 .buf = tag_arr_itr_phy_addr,
1135 .len = buf->out_tag.len,
1136 },
1137 };
1138 ++dma->buf_arr_cnt;
1139 --rem_buf_cnt;
1140 }
1141 return 0;
1142}
1143
1144/*
1145 * i2c_msm_dma_xfer_unprepare: DAM unmap buffers.
1146 */
1147static void i2c_msm_dma_xfer_unprepare(struct i2c_msm_ctrl *ctrl)
1148{
1149 int i;
1150 struct i2c_msm_dma_buf *buf_itr = ctrl->xfer.dma.buf_arr;
1151
1152 for (i = 0 ; i < ctrl->xfer.dma.buf_arr_cnt ; ++i, ++buf_itr)
1153 dma_unmap_single(ctrl->dev, buf_itr->ptr.phy_addr, buf_itr->len,
1154 buf_itr->dma_dir);
1155}
1156
1157static void i2c_msm_dma_callback_tx_complete(void *dma_async_param)
1158{
1159 struct i2c_msm_ctrl *ctrl = dma_async_param;
1160
1161 complete(&ctrl->xfer.complete);
1162}
1163
1164static void i2c_msm_dma_callback_rx_complete(void *dma_async_param)
1165{
1166 struct i2c_msm_ctrl *ctrl = dma_async_param;
1167
1168 complete(&ctrl->xfer.rx_complete);
1169}
1170
1171/*
1172 * i2c_msm_dma_xfer_process: Queue transfers to DMA
1173 * @pre 1)QUP is in run state. 2) i2c_msm_dma_xfer_prepare() was called.
1174 * @return zero on success or negative error value
1175 */
1176static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
1177{
1178 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1179 struct i2c_msm_dma_chan *tx = &dma->chan[I2C_MSM_DMA_TX];
1180 struct i2c_msm_dma_chan *rx = &dma->chan[I2C_MSM_DMA_RX];
1181 struct scatterlist *sg_rx = NULL;
1182 struct scatterlist *sg_rx_itr = NULL;
1183 struct scatterlist *sg_tx = NULL;
1184 struct scatterlist *sg_tx_itr = NULL;
1185 struct dma_async_tx_descriptor *dma_desc_rx;
1186 struct dma_async_tx_descriptor *dma_desc_tx;
1187 struct i2c_msm_dma_buf *buf_itr;
1188 int i;
1189 int ret = 0;
1190
1191 i2c_msm_dbg(ctrl, MSM_DBG, "Going to enqueue %zu buffers in DMA\n",
1192 dma->buf_arr_cnt);
1193
1194 /* Set the QUP State to pause while DMA completes the txn */
1195 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
1196 if (ret) {
1197 dev_err(ctrl->dev, "transition to pause state failed before DMA transaction :%d\n",
1198 ret);
1199 return ret;
1200 }
1201
1202 sg_tx = kcalloc(tx->desc_cnt_cur, sizeof(struct scatterlist),
1203 GFP_KERNEL);
1204 if (!sg_tx) {
1205 ret = -ENOMEM;
1206 goto dma_xfer_end;
1207 }
1208 sg_init_table(sg_tx, tx->desc_cnt_cur);
1209 sg_tx_itr = sg_tx;
1210
1211 sg_rx = kcalloc(rx->desc_cnt_cur, sizeof(struct scatterlist),
1212 GFP_KERNEL);
1213 if (!sg_rx) {
1214 ret = -ENOMEM;
1215 goto dma_xfer_end;
1216 }
1217 sg_init_table(sg_rx, rx->desc_cnt_cur);
1218 sg_rx_itr = sg_rx;
1219
1220 buf_itr = dma->buf_arr;
1221
1222 for (i = 0; i < dma->buf_arr_cnt ; ++i, ++buf_itr) {
1223 /* Queue tag */
1224 sg_dma_address(sg_tx_itr) = buf_itr->tag.buf;
1225 sg_dma_len(sg_tx_itr) = buf_itr->tag.len;
1226 ++sg_tx_itr;
1227
1228 /* read off tag + len bytes(don't care) in input FIFO
1229 * on read transfer
1230 */
1231 if (buf_itr->is_rx) {
1232 /* rid of input tag */
1233 sg_dma_address(sg_rx_itr) =
1234 ctrl->xfer.dma.input_tag.phy_addr;
1235 sg_dma_len(sg_rx_itr) = QUP_BUF_OVERHD_BC;
1236 ++sg_rx_itr;
1237
1238 /* queue data buffer */
1239 sg_dma_address(sg_rx_itr) = buf_itr->ptr.phy_addr;
1240 sg_dma_len(sg_rx_itr) = buf_itr->len;
1241 ++sg_rx_itr;
1242 } else {
1243 sg_dma_address(sg_tx_itr) = buf_itr->ptr.phy_addr;
1244 sg_dma_len(sg_tx_itr) = buf_itr->len;
1245 ++sg_tx_itr;
1246 }
1247 }
1248
1249 /* this tag will be copied to rx fifo */
1250 sg_dma_address(sg_tx_itr) = dma->eot_n_flush_stop_tags.phy_addr;
1251 sg_dma_len(sg_tx_itr) = QUP_BUF_OVERHD_BC;
1252 ++sg_tx_itr;
1253
1254 /*
1255 * Reading the tag off the input fifo has side effects and
1256 * it is mandatory for getting the DMA's interrupt.
1257 */
1258 sg_dma_address(sg_rx_itr) = ctrl->xfer.dma.input_tag.phy_addr;
1259 sg_dma_len(sg_rx_itr) = QUP_BUF_OVERHD_BC;
1260 ++sg_rx_itr;
1261
1262 /*
1263 * We only want a single BAM interrupt per transfer, and we always
1264 * add a flush-stop i2c tag as the last tx sg entry. Since the dma
1265 * driver puts the supplied BAM flags only on the last BAM descriptor,
1266 * the flush stop will always be the one which generate that interrupt
1267 * and invokes the callback.
1268 */
1269 dma_desc_tx = dmaengine_prep_slave_sg(tx->dma_chan,
1270 sg_tx,
1271 sg_tx_itr - sg_tx,
1272 tx->dir,
1273 (SPS_IOVEC_FLAG_EOT |
1274 SPS_IOVEC_FLAG_NWD));
1275 if (IS_ERR_OR_NULL(dma_desc_tx)) {
1276 dev_err(ctrl->dev, "error dmaengine_prep_slave_sg tx:%ld\n",
1277 PTR_ERR(dma_desc_tx));
1278 ret = dma_desc_tx ? PTR_ERR(dma_desc_tx) : -ENOMEM;
1279 goto dma_xfer_end;
1280 }
1281
1282 /* callback defined for tx dma desc */
1283 dma_desc_tx->callback = i2c_msm_dma_callback_tx_complete;
1284 dma_desc_tx->callback_param = ctrl;
1285 dmaengine_submit(dma_desc_tx);
1286 dma_async_issue_pending(tx->dma_chan);
1287
1288 /* queue the rx dma desc */
1289 dma_desc_rx = dmaengine_prep_slave_sg(rx->dma_chan, sg_rx,
1290 sg_rx_itr - sg_rx, rx->dir,
1291 (SPS_IOVEC_FLAG_EOT |
1292 SPS_IOVEC_FLAG_NWD));
1293 if (IS_ERR_OR_NULL(dma_desc_rx)) {
1294 dev_err(ctrl->dev,
1295 "error dmaengine_prep_slave_sg rx:%ld\n",
1296 PTR_ERR(dma_desc_rx));
1297 ret = dma_desc_rx ? PTR_ERR(dma_desc_rx) : -ENOMEM;
1298 goto dma_xfer_end;
1299 }
1300
1301 dma_desc_rx->callback = i2c_msm_dma_callback_rx_complete;
1302 dma_desc_rx->callback_param = ctrl;
1303 dmaengine_submit(dma_desc_rx);
1304 dma_async_issue_pending(rx->dma_chan);
1305
1306 /* Set the QUP State to Run when completes the txn */
1307 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1308 if (ret) {
1309 dev_err(ctrl->dev, "transition to run state failed before DMA transaction :%d\n",
1310 ret);
1311 goto dma_xfer_end;
1312 }
1313
1314 ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
1315 if (!ret && ctrl->xfer.rx_cnt)
1316 ret = i2c_msm_xfer_wait_for_completion(ctrl,
1317 &ctrl->xfer.rx_complete);
1318
1319dma_xfer_end:
1320 /* free scatter-gather lists */
1321 kfree(sg_tx);
1322 kfree(sg_rx);
1323
1324 return ret;
1325}
1326
1327static void i2c_msm_dma_free_channels(struct i2c_msm_ctrl *ctrl)
1328{
1329 int i;
1330
1331 for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
1332 struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
1333
1334 if (!chan->is_init)
1335 continue;
1336
1337 dma_release_channel(chan->dma_chan);
1338 chan->is_init = false;
1339 chan->dma_chan = NULL;
1340 }
1341 if (ctrl->xfer.dma.state > I2C_MSM_DMA_INIT_CORE)
1342 ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CORE;
1343}
1344
1345static const char * const i2c_msm_dma_chan_name[] = {"tx", "rx"};
1346
1347static int i2c_msm_dmaengine_dir[] = {
1348 DMA_MEM_TO_DEV, DMA_DEV_TO_MEM
1349};
1350
1351static int i2c_msm_dma_init_channels(struct i2c_msm_ctrl *ctrl)
1352{
1353 int ret = 0;
1354 int i;
1355
1356 /* Iterate over the dma channels to initialize them */
1357 for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
1358 struct dma_slave_config cfg = {0};
1359 struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
1360
1361 if (chan->is_init)
1362 continue;
1363
1364 chan->name = i2c_msm_dma_chan_name[i];
1365 chan->dma_chan = dma_request_slave_channel(ctrl->dev,
1366 chan->name);
1367 if (!chan->dma_chan) {
1368 dev_err(ctrl->dev,
1369 "error dma_request_slave_channel(dev:%s chan:%s)\n",
1370 dev_name(ctrl->dev), chan->name);
1371 /* free the channels if allocated before */
1372 i2c_msm_dma_free_channels(ctrl);
1373 return -ENODEV;
1374 }
1375
1376 chan->dir = cfg.direction = i2c_msm_dmaengine_dir[i];
1377 ret = dmaengine_slave_config(chan->dma_chan, &cfg);
1378 if (ret) {
1379 dev_err(ctrl->dev,
1380 "error:%d dmaengine_slave_config(chan:%s)\n",
1381 ret, chan->name);
1382 dma_release_channel(chan->dma_chan);
1383 chan->dma_chan = NULL;
1384 i2c_msm_dma_free_channels(ctrl);
1385 return ret;
1386 }
1387 chan->is_init = true;
1388 }
1389 ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CHAN;
1390 return 0;
1391}
1392
1393static void i2c_msm_dma_teardown(struct i2c_msm_ctrl *ctrl)
1394{
1395 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1396
1397 i2c_msm_dma_free_channels(ctrl);
1398
1399 if (dma->state > I2C_MSM_DMA_INIT_NONE)
1400 dma_free_coherent(ctrl->dev, I2C_MSM_DMA_TAG_MEM_SZ,
1401 dma->input_tag.vrtl_addr,
1402 dma->input_tag.phy_addr);
1403
1404 dma->state = I2C_MSM_DMA_INIT_NONE;
1405}
1406
1407static int i2c_msm_dma_init(struct i2c_msm_ctrl *ctrl)
1408{
1409 struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
1410 u8 *tags_space_virt_addr;
1411 dma_addr_t tags_space_phy_addr;
1412
1413 /* check if DMA core is initialized */
1414 if (dma->state > I2C_MSM_DMA_INIT_NONE)
1415 goto dma_core_is_init;
1416
1417 /*
1418 * allocate dma memory for input_tag + eot_n_flush_stop_tags + tag_arr
1419 * for more see: I2C_MSM_DMA_TAG_MEM_SZ definition
1420 */
1421 tags_space_virt_addr = dma_alloc_coherent(
1422 ctrl->dev,
1423 I2C_MSM_DMA_TAG_MEM_SZ,
1424 &tags_space_phy_addr,
1425 GFP_KERNEL);
1426 if (!tags_space_virt_addr) {
1427 dev_err(ctrl->dev,
1428 "error alloc %d bytes of DMAable memory for DMA tags space\n",
1429 I2C_MSM_DMA_TAG_MEM_SZ);
1430 return -ENOMEM;
1431 }
1432
1433 /*
1434 * set the dma-tags virtual and physical addresses:
1435 * 1) the first tag space is for the input (throw away) tag
1436 */
1437 dma->input_tag.vrtl_addr = tags_space_virt_addr;
1438 dma->input_tag.phy_addr = tags_space_phy_addr;
1439
1440 /* 2) second tag space is for eot_flush_stop tag which is const value */
1441 tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
1442 tags_space_phy_addr += I2C_MSM_TAG2_MAX_LEN;
1443 dma->eot_n_flush_stop_tags.vrtl_addr = tags_space_virt_addr;
1444 dma->eot_n_flush_stop_tags.phy_addr = tags_space_phy_addr;
1445
1446 /* set eot_n_flush_stop_tags value */
1447 *((u16 *) dma->eot_n_flush_stop_tags.vrtl_addr) =
1448 QUP_TAG2_INPUT_EOT | (QUP_TAG2_FLUSH_STOP << 8);
1449
1450 /* 3) all other tag spaces are used for transfer tags */
1451 tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
1452 tags_space_phy_addr += I2C_MSM_TAG2_MAX_LEN;
1453 dma->tag_arr.vrtl_addr = tags_space_virt_addr;
1454 dma->tag_arr.phy_addr = tags_space_phy_addr;
1455
1456 dma->state = I2C_MSM_DMA_INIT_CORE;
1457
1458dma_core_is_init:
1459 return i2c_msm_dma_init_channels(ctrl);
1460}
1461
1462static int i2c_msm_dma_xfer(struct i2c_msm_ctrl *ctrl)
1463{
1464 int ret;
1465
1466 ret = i2c_msm_dma_init(ctrl);
1467 if (ret) {
1468 dev_err(ctrl->dev, "DMA Init Failed: %d\n", ret);
1469 return ret;
1470 }
1471
1472 /* dma map user's buffers and create tags */
1473 ret = i2c_msm_dma_xfer_prepare(ctrl);
1474 if (ret < 0) {
1475 dev_err(ctrl->dev, "error on i2c_msm_dma_xfer_prepare():%d\n",
1476 ret);
1477 goto err_dma_xfer;
1478 }
1479
1480 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1481 if (ret < 0)
1482 goto err_dma_xfer;
1483
1484 /* program qup registers */
1485 i2c_msm_qup_xfer_init_reset_state(ctrl);
1486
1487 ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
1488 if (ret < 0)
1489 goto err_dma_xfer;
1490
1491 /* program qup registers which must be set *after* reset */
1492 i2c_msm_qup_xfer_init_run_state(ctrl);
1493
1494 /* enqueue transfer buffers */
1495 ret = i2c_msm_dma_xfer_process(ctrl);
1496 if (ret)
1497 dev_err(ctrl->dev,
1498 "error i2c_msm_dma_xfer_process(n_bufs:%zu):%d\n",
1499 ctrl->xfer.dma.buf_arr_cnt, ret);
1500
1501err_dma_xfer:
1502 i2c_msm_dma_xfer_unprepare(ctrl);
1503 return ret;
1504}
1505
1506/*
1507 * i2c_msm_qup_slv_holds_bus: true when slave hold the SDA low
1508 */
1509static bool i2c_msm_qup_slv_holds_bus(struct i2c_msm_ctrl *ctrl)
1510{
1511 u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
1512
1513 bool slv_holds_bus = !(status & QUP_I2C_SDA) &&
1514 (status & QUP_BUS_ACTIVE) &&
1515 !(status & QUP_BUS_MASTER);
1516 if (slv_holds_bus)
1517 dev_info(ctrl->dev,
1518 "bus lines held low by a slave detected\n");
1519
1520 return slv_holds_bus;
1521}
1522
1523/*
1524 * i2c_msm_qup_poll_bus_active_unset: poll until QUP_BUS_ACTIVE is unset
1525 *
1526 * @return zero when bus inactive, or nonzero on timeout.
1527 *
1528 * Loop and reads QUP_I2C_MASTER_STATUS until bus is inactive or timeout
1529 * reached. Used to avoid race condition due to gap between QUP completion
1530 * interrupt and QUP issuing stop signal on the bus.
1531 */
1532static int i2c_msm_qup_poll_bus_active_unset(struct i2c_msm_ctrl *ctrl)
1533{
1534 void __iomem *base = ctrl->rsrcs.base;
1535 ulong timeout = jiffies + msecs_to_jiffies(I2C_MSM_MAX_POLL_MSEC);
1536 int ret = 0;
1537 size_t read_cnt = 0;
1538
1539 do {
1540 if (!(readl_relaxed(base + QUP_I2C_STATUS) & QUP_BUS_ACTIVE))
1541 goto poll_active_end;
1542 ++read_cnt;
1543 } while (time_before_eq(jiffies, timeout));
1544
1545 ret = -EBUSY;
1546
1547poll_active_end:
1548 /* second logged value is time-left before timeout or zero if expired */
1549 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_ACTV_END,
1550 ret, (ret ? 0 : (timeout - jiffies)), read_cnt);
1551
1552 return ret;
1553}
1554
1555static void i2c_msm_clk_path_vote(struct i2c_msm_ctrl *ctrl)
1556{
1557 i2c_msm_clk_path_init(ctrl);
1558
1559 if (ctrl->rsrcs.clk_path_vote.client_hdl)
1560 msm_bus_scale_client_update_request(
1561 ctrl->rsrcs.clk_path_vote.client_hdl,
1562 I2C_MSM_CLK_PATH_RESUME_VEC);
1563}
1564
1565static void i2c_msm_clk_path_unvote(struct i2c_msm_ctrl *ctrl)
1566{
1567 if (ctrl->rsrcs.clk_path_vote.client_hdl)
1568 msm_bus_scale_client_update_request(
1569 ctrl->rsrcs.clk_path_vote.client_hdl,
1570 I2C_MSM_CLK_PATH_SUSPEND_VEC);
1571}
1572
1573static void i2c_msm_clk_path_teardown(struct i2c_msm_ctrl *ctrl)
1574{
1575 if (ctrl->rsrcs.clk_path_vote.client_hdl) {
1576 msm_bus_scale_unregister_client(
1577 ctrl->rsrcs.clk_path_vote.client_hdl);
1578 ctrl->rsrcs.clk_path_vote.client_hdl = 0;
1579 }
1580}
1581
1582/*
1583 * i2c_msm_clk_path_init_structs: internal impl detail of i2c_msm_clk_path_init
1584 *
1585 * allocates and initilizes the bus scaling vectors.
1586 */
1587static int i2c_msm_clk_path_init_structs(struct i2c_msm_ctrl *ctrl)
1588{
1589 struct msm_bus_vectors *paths = NULL;
1590 struct msm_bus_paths *usecases = NULL;
1591
1592 i2c_msm_dbg(ctrl, MSM_PROF, "initializes path clock voting structs\n");
1593
1594 paths = kzalloc(sizeof(*paths) * 2, GFP_KERNEL);
1595 if (!paths)
1596 return -ENOMEM;
1597
1598 usecases = kzalloc(sizeof(*usecases) * 2, GFP_KERNEL);
1599 if (!usecases)
1600 goto path_init_err;
1601
1602 ctrl->rsrcs.clk_path_vote.pdata = kzalloc(
1603 sizeof(*ctrl->rsrcs.clk_path_vote.pdata),
1604 GFP_KERNEL);
1605 if (!ctrl->rsrcs.clk_path_vote.pdata)
1606 goto path_init_err;
1607
1608 paths[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
1609 .src = ctrl->rsrcs.clk_path_vote.mstr_id,
1610 .dst = MSM_BUS_SLAVE_EBI_CH0,
1611 .ab = 0,
1612 .ib = 0,
1613 };
1614
1615 paths[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
1616 .src = ctrl->rsrcs.clk_path_vote.mstr_id,
1617 .dst = MSM_BUS_SLAVE_EBI_CH0,
1618 .ab = I2C_MSM_CLK_PATH_AVRG_BW(ctrl),
1619 .ib = I2C_MSM_CLK_PATH_BRST_BW(ctrl),
1620 };
1621
1622 usecases[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
1623 .num_paths = 1,
1624 .vectors = &paths[I2C_MSM_CLK_PATH_SUSPEND_VEC],
1625 };
1626
1627 usecases[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
1628 .num_paths = 1,
1629 .vectors = &paths[I2C_MSM_CLK_PATH_RESUME_VEC],
1630 };
1631
1632 *ctrl->rsrcs.clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
1633 .usecase = usecases,
1634 .num_usecases = 2,
1635 .name = dev_name(ctrl->dev),
1636 };
1637
1638 return 0;
1639
1640path_init_err:
1641 kfree(paths);
1642 kfree(usecases);
1643 kfree(ctrl->rsrcs.clk_path_vote.pdata);
1644 ctrl->rsrcs.clk_path_vote.pdata = NULL;
1645 return -ENOMEM;
1646}
1647
1648/*
1649 * i2c_msm_clk_path_postponed_register: reg with bus-scaling after it is probed
1650 *
1651 * @return zero on success
1652 *
1653 * Workaround: i2c driver may be probed before the bus scaling driver. Calling
1654 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
1655 * ready yet. Thus, this function should be called not from probe but from a
1656 * later context. Also, this function may be called more then once before
1657 * register succeed. At this case only one error message will be logged. At boot
1658 * time all clocks are on, so earlier i2c transactions should succeed.
1659 */
1660static int i2c_msm_clk_path_postponed_register(struct i2c_msm_ctrl *ctrl)
1661{
1662 ctrl->rsrcs.clk_path_vote.client_hdl =
1663 msm_bus_scale_register_client(ctrl->rsrcs.clk_path_vote.pdata);
1664
1665 if (ctrl->rsrcs.clk_path_vote.client_hdl) {
1666 if (ctrl->rsrcs.clk_path_vote.reg_err) {
1667 /* log a success message if an error msg was logged */
1668 ctrl->rsrcs.clk_path_vote.reg_err = false;
1669 dev_err(ctrl->dev,
1670 "msm_bus_scale_register_client(mstr-id:%d):0x%x (ok)\n",
1671 ctrl->rsrcs.clk_path_vote.mstr_id,
1672 ctrl->rsrcs.clk_path_vote.client_hdl);
1673 }
1674 } else {
1675 /* guard to log only one error on multiple failure */
1676 if (!ctrl->rsrcs.clk_path_vote.reg_err) {
1677 ctrl->rsrcs.clk_path_vote.reg_err = true;
1678
1679 dev_info(ctrl->dev,
1680 "msm_bus_scale_register_client(mstr-id:%d):0 (not a problem)\n",
1681 ctrl->rsrcs.clk_path_vote.mstr_id);
1682 }
1683 }
1684
1685 return ctrl->rsrcs.clk_path_vote.client_hdl ? 0 : -EAGAIN;
1686}
1687
1688static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl)
1689{
1690 /*
1691 * bail out if path voting is diabled (master_id == 0) or if it is
1692 * already registered (client_hdl != 0)
1693 */
1694 if (!ctrl->rsrcs.clk_path_vote.mstr_id ||
1695 ctrl->rsrcs.clk_path_vote.client_hdl)
1696 return;
1697
1698 /* if fail once then try no more */
1699 if (!ctrl->rsrcs.clk_path_vote.pdata &&
1700 i2c_msm_clk_path_init_structs(ctrl)) {
1701 ctrl->rsrcs.clk_path_vote.mstr_id = 0;
1702 return;
1703 }
1704
1705 /* on failure try again later */
1706 if (i2c_msm_clk_path_postponed_register(ctrl))
1707 return;
1708}
1709
1710/*
1711 * i2c_msm_qup_isr: QUP interrupt service routine
1712 */
1713static irqreturn_t i2c_msm_qup_isr(int irq, void *devid)
1714{
1715 struct i2c_msm_ctrl *ctrl = devid;
1716 void __iomem *base = ctrl->rsrcs.base;
1717 struct i2c_msm_xfer *xfer = &ctrl->xfer;
1718 struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
1719 u32 err_flags = 0;
1720 u32 clr_flds = 0;
1721 bool log_event = false;
1722 bool signal_complete = false;
1723 bool need_wmb = false;
1724
1725 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_IRQ_BGN, irq, 0, 0);
1726
1727 if (!atomic_read(&ctrl->xfer.is_active)) {
1728 dev_info(ctrl->dev, "irq:%d when no active transfer\n", irq);
1729 return IRQ_HANDLED;
1730 }
1731
1732 ctrl->i2c_sts_reg = readl_relaxed(base + QUP_I2C_STATUS);
1733 err_flags = readl_relaxed(base + QUP_ERROR_FLAGS);
1734 ctrl->qup_op_reg = readl_relaxed(base + QUP_OPERATIONAL);
1735
1736 if (ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK) {
1737 signal_complete = true;
1738 log_event = true;
1739 /*
1740 * If there is more than 1 error here, last one sticks.
1741 * The order of the error set here matters.
1742 */
1743 if (ctrl->i2c_sts_reg & QUP_ARB_LOST)
1744 ctrl->xfer.err = I2C_MSM_ERR_ARB_LOST;
1745
1746 if (ctrl->i2c_sts_reg & QUP_BUS_ERROR)
1747 ctrl->xfer.err = I2C_MSM_ERR_BUS_ERR;
1748
1749 if (ctrl->i2c_sts_reg & QUP_PACKET_NACKED)
1750 ctrl->xfer.err = I2C_MSM_ERR_NACK;
1751 }
1752
1753 /* check for FIFO over/under runs error */
1754 if (err_flags & QUP_ERR_FLGS_MASK)
1755 ctrl->xfer.err = I2C_MSM_ERR_OVR_UNDR_RUN;
1756
1757 /* Dump the register values before reset the core */
1758 if (ctrl->xfer.err && ctrl->dbgfs.dbg_lvl >= MSM_DBG)
1759 i2c_msm_dbg_qup_reg_dump(ctrl);
1760
1761 /* clear interrupts fields */
1762 clr_flds = ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK;
1763 if (clr_flds) {
1764 writel_relaxed(clr_flds, base + QUP_I2C_STATUS);
1765 need_wmb = true;
1766 }
1767
1768 clr_flds = err_flags & QUP_ERR_FLGS_MASK;
1769 if (clr_flds) {
1770 writel_relaxed(clr_flds, base + QUP_ERROR_FLAGS);
1771 need_wmb = true;
1772 }
1773
1774 clr_flds = ctrl->qup_op_reg &
1775 (QUP_OUTPUT_SERVICE_FLAG |
1776 QUP_INPUT_SERVICE_FLAG);
1777 if (clr_flds) {
1778 writel_relaxed(clr_flds, base + QUP_OPERATIONAL);
1779 need_wmb = true;
1780 }
1781
1782 if (need_wmb)
1783 /*
1784 * flush writes that clear the interrupt flags before changing
1785 * state to reset.
1786 */
1787 wmb();
1788
1789 /* Reset and bail out on error */
1790 if (ctrl->xfer.err) {
1791 /* Flush for the tags in case of an error and DMA Mode*/
1792 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA) {
1793 writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
1794 + QUP_STATE);
1795 /*
1796 * Ensure that QUP_I2C_FLUSH is written before
1797 * State reset
1798 */
1799 wmb();
1800 }
1801
1802 /* HW workaround: when interrupt is level triggerd, more
1803 * than one interrupt may fire in error cases. Thus we
1804 * change the QUP core state to Reset immediately in the
1805 * ISR to ward off the next interrupt.
1806 */
1807 writel_relaxed(QUP_STATE_RESET, ctrl->rsrcs.base + QUP_STATE);
1808
1809 signal_complete = true;
1810 log_event = true;
1811 goto isr_end;
1812 }
1813
1814 /* handle data completion */
1815 if (xfer->mode_id == I2C_MSM_XFER_MODE_BLOCK) {
1816 /* block ready for writing */
1817 if (ctrl->qup_op_reg & QUP_OUTPUT_SERVICE_FLAG) {
1818 log_event = true;
1819 if (ctrl->qup_op_reg & QUP_OUT_BLOCK_WRITE_REQ)
1820 complete(&blk->wait_tx_blk);
1821
1822 if ((ctrl->qup_op_reg & blk->complete_mask)
1823 == blk->complete_mask) {
1824 log_event = true;
1825 signal_complete = true;
1826 }
1827 }
1828 /* block ready for reading */
1829 if (ctrl->qup_op_reg & QUP_INPUT_SERVICE_FLAG) {
1830 log_event = true;
1831 complete(&blk->wait_rx_blk);
1832 }
1833 } else {
1834 /* for FIFO/DMA Mode*/
1835 if (ctrl->qup_op_reg & QUP_MAX_INPUT_DONE_FLAG) {
1836 log_event = true;
1837 /*
1838 * If last transaction is an input then the entire
1839 * transfer is done
1840 */
1841 if (ctrl->xfer.last_is_rx)
1842 signal_complete = true;
1843 }
1844 /*
1845 * Ideally, would like to check QUP_MAX_OUTPUT_DONE_FLAG.
1846 * However, QUP_MAX_OUTPUT_DONE_FLAG is lagging behind
1847 * QUP_OUTPUT_SERVICE_FLAG. The only reason for
1848 * QUP_OUTPUT_SERVICE_FLAG to be set in FIFO mode is
1849 * QUP_MAX_OUTPUT_DONE_FLAG condition. The code checking
1850 * here QUP_OUTPUT_SERVICE_FLAG and assumes that
1851 * QUP_MAX_OUTPUT_DONE_FLAG.
1852 */
1853 if (ctrl->qup_op_reg & (QUP_OUTPUT_SERVICE_FLAG |
1854 QUP_MAX_OUTPUT_DONE_FLAG)) {
1855 log_event = true;
1856 /*
1857 * If last transaction is an output then the
1858 * entire transfer is done
1859 */
1860 if (!ctrl->xfer.last_is_rx)
1861 signal_complete = true;
1862 }
1863 }
1864
1865isr_end:
1866 if (log_event || (ctrl->dbgfs.dbg_lvl >= MSM_DBG))
1867 i2c_msm_prof_evnt_add(ctrl, MSM_PROF,
1868 I2C_MSM_IRQ_END,
1869 ctrl->i2c_sts_reg, ctrl->qup_op_reg,
1870 err_flags);
1871
1872 if (signal_complete)
1873 complete(&ctrl->xfer.complete);
1874
1875 return IRQ_HANDLED;
1876}
1877
1878static void i2x_msm_blk_free_cache(struct i2c_msm_ctrl *ctrl)
1879{
1880 kfree(ctrl->xfer.blk.tx_cache);
1881 kfree(ctrl->xfer.blk.rx_cache);
1882}
1883
1884static void i2c_msm_qup_init(struct i2c_msm_ctrl *ctrl)
1885{
1886 u32 state;
1887 void __iomem *base = ctrl->rsrcs.base;
1888
1889 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_PROF_RESET, 0, 0, 0);
1890
1891 i2c_msm_qup_sw_reset(ctrl);
1892 i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
1893
1894 writel_relaxed(QUP_N_VAL | QUP_MINI_CORE_I2C_VAL, base + QUP_CONFIG);
1895
1896 writel_relaxed(QUP_OUTPUT_OVER_RUN_ERR_EN | QUP_INPUT_UNDER_RUN_ERR_EN
1897 | QUP_OUTPUT_UNDER_RUN_ERR_EN | QUP_INPUT_OVER_RUN_ERR_EN,
1898 base + QUP_ERROR_FLAGS_EN);
1899
1900 writel_relaxed(QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK,
1901 base + QUP_OPERATIONAL_MASK);
1902
1903 writel_relaxed(QUP_EN_VERSION_TWO_TAG, base + QUP_I2C_MASTER_CONFIG);
1904
1905 i2c_msm_qup_fifo_calc_size(ctrl);
1906 /*
1907 * Ensure that QUP configuration is written and that fifo size if read
1908 * before leaving this function
1909 */
1910 mb();
1911
1912 state = readl_relaxed(base + QUP_STATE);
1913
1914 if (!(state & QUP_I2C_MAST_GEN))
1915 dev_err(ctrl->dev,
1916 "error on verifying HW support (I2C_MAST_GEN=0)\n");
1917}
1918
1919static void qup_i2c_recover_bit_bang(struct i2c_msm_ctrl *ctrl)
1920{
1921 int i, ret;
1922 int gpio_clk;
1923 int gpio_dat;
1924 bool gpio_clk_status = false;
1925 u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
1926 struct pinctrl_state *bitbang;
1927
1928 dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
1929 disable_irq(ctrl->rsrcs.irq);
1930 if (!(status & (I2C_STATUS_BUS_ACTIVE)) ||
1931 (status & (I2C_STATUS_BUS_MASTER))) {
1932 dev_warn(ctrl->dev, "unexpected i2c recovery call:0x%x\n",
1933 status);
1934 goto recovery_exit;
1935 }
1936
1937 gpio_clk = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-clk",
1938 0);
1939 gpio_dat = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-dat",
1940 0);
1941
1942 if (gpio_clk < 0 || gpio_dat < 0) {
1943 dev_warn(ctrl->dev, "SW bigbang err: i2c gpios not known\n");
1944 goto recovery_exit;
1945 }
1946
1947 bitbang = i2c_msm_rsrcs_gpio_get_state(ctrl, "i2c_bitbang");
1948 if (bitbang)
1949 ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, bitbang);
1950 if (!bitbang || ret) {
1951 dev_err(ctrl->dev, "GPIO pins have no bitbang setting\n");
1952 goto recovery_exit;
1953 }
1954 for (i = 0; i < 10; i++) {
1955 if (gpio_get_value(gpio_dat) && gpio_clk_status)
1956 break;
1957 gpio_direction_output(gpio_clk, 0);
1958 udelay(5);
1959 gpio_direction_output(gpio_dat, 0);
1960 udelay(5);
1961 gpio_direction_input(gpio_clk);
1962 udelay(5);
1963 if (!gpio_get_value(gpio_clk))
1964 udelay(20);
1965 if (!gpio_get_value(gpio_clk))
1966 usleep_range(10000, 10001);
1967 gpio_clk_status = gpio_get_value(gpio_clk);
1968 gpio_direction_input(gpio_dat);
1969 udelay(5);
1970 }
1971
1972 i2c_msm_pm_pinctrl_state(ctrl, true);
1973 udelay(10);
1974
1975 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
1976 if (!(status & I2C_STATUS_BUS_ACTIVE)) {
1977 dev_info(ctrl->dev,
1978 "Bus busy cleared after %d clock cycles, status %x\n",
1979 i, status);
1980 goto recovery_exit;
1981 }
1982
1983 dev_warn(ctrl->dev, "Bus still busy, status %x\n", status);
1984
1985recovery_exit:
1986 enable_irq(ctrl->rsrcs.irq);
1987}
1988
1989static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
1990{
1991 /* poll until bus is released */
1992 if (i2c_msm_qup_poll_bus_active_unset(ctrl)) {
1993 if ((ctrl->xfer.err == I2C_MSM_ERR_ARB_LOST) ||
1994 (ctrl->xfer.err == I2C_MSM_ERR_BUS_ERR) ||
1995 (ctrl->xfer.err == I2C_MSM_ERR_TIMEOUT)) {
1996 if (i2c_msm_qup_slv_holds_bus(ctrl))
1997 qup_i2c_recover_bit_bang(ctrl);
1998
1999 /* do not generalize error to EIO if its already set */
2000 if (!err)
2001 err = -EIO;
2002 }
2003 }
2004
2005 /*
2006 * Disable the IRQ before change to reset state to avoid
2007 * spurious interrupts.
2008 *
2009 */
2010 disable_irq(ctrl->rsrcs.irq);
2011
2012 /* flush dma data and reset the qup core in timeout error.
2013 * for other error case, its handled by the ISR
2014 */
2015 if (ctrl->xfer.err & I2C_MSM_ERR_TIMEOUT) {
2016 /* Flush for the DMA registers */
2017 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
2018 writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
2019 + QUP_STATE);
2020
2021 /* reset the qup core */
2022 i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
2023 err = -ETIMEDOUT;
2024 } else if (ctrl->xfer.err == I2C_MSM_ERR_NACK) {
2025 err = -ENOTCONN;
2026 }
2027
2028 return err;
2029}
2030
2031static enum i2c_msm_xfer_mode_id
2032i2c_msm_qup_choose_mode(struct i2c_msm_ctrl *ctrl)
2033{
2034 struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
2035 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2036 size_t rx_cnt_sum = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
2037 size_t tx_cnt_sum = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
2038
2039
2040 if (ctrl->dbgfs.force_xfer_mode != I2C_MSM_XFER_MODE_NONE)
2041 return ctrl->dbgfs.force_xfer_mode;
2042
2043 if (((rx_cnt_sum < fifo->input_fifo_sz) &&
2044 (tx_cnt_sum < fifo->output_fifo_sz)))
2045 return I2C_MSM_XFER_MODE_FIFO;
2046
2047 if (ctrl->rsrcs.disable_dma)
2048 return I2C_MSM_XFER_MODE_BLOCK;
2049
2050 return I2C_MSM_XFER_MODE_DMA;
2051}
2052
2053/*
2054 * i2c_msm_xfer_calc_timeout: calc maximum xfer time in jiffies
2055 *
2056 * Basically timeout = (bit_count / frequency) * safety_coefficient.
2057 * The safety-coefficient also accounts for debugging delay (mostly from
2058 * printk() calls).
2059 */
2060static void i2c_msm_xfer_calc_timeout(struct i2c_msm_ctrl *ctrl)
2061{
2062 size_t byte_cnt = ctrl->xfer.rx_cnt + ctrl->xfer.tx_cnt;
2063 size_t bit_cnt = byte_cnt * 9;
2064 size_t bit_usec = (bit_cnt * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out;
2065 size_t loging_ovrhd_coef = ctrl->dbgfs.dbg_lvl + 1;
2066 size_t safety_coef = I2C_MSM_TIMEOUT_SAFETY_COEF * loging_ovrhd_coef;
2067 size_t xfer_max_usec = (bit_usec * safety_coef) +
2068 I2C_MSM_TIMEOUT_MIN_USEC;
2069
2070 ctrl->xfer.timeout = usecs_to_jiffies(xfer_max_usec);
2071}
2072
2073static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
2074 struct completion *complete)
2075{
2076 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2077 long time_left;
2078 int ret = 0;
2079
2080 time_left = wait_for_completion_timeout(complete,
2081 xfer->timeout);
2082 if (!time_left) {
2083 xfer->err = I2C_MSM_ERR_TIMEOUT;
2084 i2c_msm_dbg_dump_diag(ctrl, false, 0, 0);
2085 ret = -EIO;
2086 i2c_msm_prof_evnt_add(ctrl, MSM_ERR, I2C_MSM_COMPLT_FL,
2087 xfer->timeout, time_left, 0);
2088 } else {
2089 /* return an error if one detected by ISR */
2090 if (ctrl->xfer.err ||
2091 (ctrl->dbgfs.dbg_lvl >= MSM_DBG)) {
2092 i2c_msm_dbg_dump_diag(ctrl, true,
2093 ctrl->i2c_sts_reg, ctrl->qup_op_reg);
2094 ret = -(xfer->err);
2095 }
2096 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_COMPLT_OK,
2097 xfer->timeout, time_left, 0);
2098 }
2099
2100 return ret;
2101}
2102
2103static u16 i2c_msm_slv_rd_wr_addr(u16 slv_addr, bool is_rx)
2104{
2105 return (slv_addr << 1) | (is_rx ? 0x1 : 0x0);
2106}
2107
2108/*
2109 * @return true when the current transfer's buffer points to the last message
2110 * of the user's request.
2111 */
2112static bool i2c_msm_xfer_msg_is_last(struct i2c_msm_ctrl *ctrl)
2113{
2114 return ctrl->xfer.cur_buf.msg_idx >= (ctrl->xfer.msg_cnt - 1);
2115}
2116
2117/*
2118 * @return true when the current transfer's buffer points to the last
2119 * transferable buffer (size =< QUP_MAX_BUF_SZ) of the last message of the
2120 * user's request.
2121 */
2122static bool i2c_msm_xfer_buf_is_last(struct i2c_msm_ctrl *ctrl)
2123{
2124 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2125 struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
2126
2127 return i2c_msm_xfer_msg_is_last(ctrl) &&
2128 ((cur_buf->byte_idx + QUP_MAX_BUF_SZ) >= cur_msg->len);
2129}
2130
2131static void i2c_msm_xfer_create_cur_tag(struct i2c_msm_ctrl *ctrl,
2132 bool start_req)
2133{
2134 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2135
2136 cur_buf->out_tag = i2c_msm_tag_create(start_req, cur_buf->is_last,
2137 cur_buf->is_rx, cur_buf->len,
2138 cur_buf->slv_addr);
2139
2140 cur_buf->in_tag.len = cur_buf->is_rx ? QUP_BUF_OVERHD_BC : 0;
2141}
2142
2143/*
2144 * i2c_msm_xfer_next_buf: support cases when msg.len > 256 bytes
2145 *
2146 * @return true when next buffer exist, or false when no such buffer
2147 */
2148static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
2149{
2150 struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
2151 struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
2152 int bc_rem = cur_msg->len - cur_buf->end_idx;
2153
2154 if (cur_buf->is_init && cur_buf->end_idx && bc_rem) {
2155 /* not the first buffer in a message */
2156
2157 cur_buf->byte_idx = cur_buf->end_idx;
2158 cur_buf->is_last = i2c_msm_xfer_buf_is_last(ctrl);
2159 cur_buf->len = min_t(int, bc_rem, QUP_MAX_BUF_SZ);
2160 cur_buf->end_idx += cur_buf->len;
2161
2162 /* No Start is required if it is not a first buffer in msg */
2163 i2c_msm_xfer_create_cur_tag(ctrl, false);
2164 } else {
2165 /* first buffer in a new message */
2166 if (cur_buf->is_init) {
2167 if (i2c_msm_xfer_msg_is_last(ctrl))
2168 return false;
2169
2170 ++cur_buf->msg_idx;
2171 ++cur_msg;
2172 } else {
2173 cur_buf->is_init = true;
2174 }
2175 cur_buf->byte_idx = 0;
2176 cur_buf->is_last = i2c_msm_xfer_buf_is_last(ctrl);
2177 cur_buf->len = min_t(int, cur_msg->len, QUP_MAX_BUF_SZ);
2178 cur_buf->is_rx = (cur_msg->flags & I2C_M_RD);
2179 cur_buf->end_idx = cur_buf->len;
2180 cur_buf->slv_addr = i2c_msm_slv_rd_wr_addr(cur_msg->addr,
2181 cur_buf->is_rx);
2182 i2c_msm_xfer_create_cur_tag(ctrl, true);
2183 }
2184 i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_NEXT_BUF, cur_buf->msg_idx,
2185 cur_buf->byte_idx, 0);
2186 return true;
2187}
2188
2189static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
2190{
2191 clk_unprepare(ctrl->rsrcs.core_clk);
2192 clk_unprepare(ctrl->rsrcs.iface_clk);
2193}
2194
2195static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
2196{
2197 int ret;
2198
2199 ret = clk_prepare(ctrl->rsrcs.iface_clk);
2200
2201 if (ret) {
2202 dev_err(ctrl->dev,
2203 "error on clk_prepare(iface_clk):%d\n", ret);
2204 return ret;
2205 }
2206
2207 ret = clk_prepare(ctrl->rsrcs.core_clk);
2208 if (ret) {
2209 clk_unprepare(ctrl->rsrcs.iface_clk);
2210 dev_err(ctrl->dev,
2211 "error clk_prepare(core_clk):%d\n", ret);
2212 }
2213 return ret;
2214}
2215
2216static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
2217{
2218 clk_disable(ctrl->rsrcs.core_clk);
2219 clk_disable(ctrl->rsrcs.iface_clk);
2220}
2221
2222static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
2223{
2224 int ret;
2225
2226 ret = clk_enable(ctrl->rsrcs.iface_clk);
2227 if (ret) {
2228 dev_err(ctrl->dev,
2229 "error on clk_enable(iface_clk):%d\n", ret);
2230 i2c_msm_pm_clk_unprepare(ctrl);
2231 return ret;
2232 }
2233 ret = clk_enable(ctrl->rsrcs.core_clk);
2234 if (ret) {
2235 clk_disable(ctrl->rsrcs.iface_clk);
2236 i2c_msm_pm_clk_unprepare(ctrl);
2237 dev_err(ctrl->dev,
2238 "error clk_enable(core_clk):%d\n", ret);
2239 }
2240 return ret;
2241}
2242
2243static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
2244{
2245 int ret;
2246
2247 mutex_lock(&ctrl->xfer.mtx);
2248
2249 i2c_msm_pm_pinctrl_state(ctrl, true);
2250 pm_runtime_get_sync(ctrl->dev);
2251 /*
2252 * if runtime PM callback was not invoked (when both runtime-pm
2253 * and systme-pm are in transition concurrently)
2254 */
2255 if (ctrl->pwr_state != I2C_MSM_PM_RT_ACTIVE) {
2256 dev_info(ctrl->dev, "Runtime PM-callback was not invoked\n");
2257 i2c_msm_pm_resume(ctrl->dev);
2258 }
2259
2260 ret = i2c_msm_pm_clk_enable(ctrl);
2261 if (ret) {
2262 mutex_unlock(&ctrl->xfer.mtx);
2263 return ret;
2264 }
2265 i2c_msm_qup_init(ctrl);
2266
2267 /* Set xfer to active state (efectively enabling our ISR)*/
2268 atomic_set(&ctrl->xfer.is_active, 1);
2269
2270 enable_irq(ctrl->rsrcs.irq);
2271 return 0;
2272}
2273
2274static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
2275{
2276
2277 atomic_set(&ctrl->xfer.is_active, 0);
2278
2279 /*
2280 * DMA resources are freed due to multi-EE use case.
2281 * Other EEs can potentially use the DMA
2282 * resources with in the same runtime PM vote.
2283 */
2284 if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
2285 i2c_msm_dma_free_channels(ctrl);
2286
2287 i2c_msm_pm_clk_disable(ctrl);
2288
2289 if (!pm_runtime_enabled(ctrl->dev))
2290 i2c_msm_pm_suspend(ctrl->dev);
2291
2292 pm_runtime_mark_last_busy(ctrl->dev);
2293 pm_runtime_put_autosuspend(ctrl->dev);
2294 i2c_msm_pm_pinctrl_state(ctrl, false);
2295 mutex_unlock(&ctrl->xfer.mtx);
2296}
2297
2298/*
2299 * i2c_msm_xfer_scan: initial input scan
2300 */
2301static void i2c_msm_xfer_scan(struct i2c_msm_ctrl *ctrl)
2302{
2303 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2304 struct i2c_msm_xfer_buf *cur_buf = &xfer->cur_buf;
2305
2306 while (i2c_msm_xfer_next_buf(ctrl)) {
2307
2308 if (cur_buf->is_rx)
2309 xfer->rx_cnt += cur_buf->len;
2310 else
2311 xfer->tx_cnt += cur_buf->len;
2312
2313 xfer->rx_ovrhd_cnt += cur_buf->in_tag.len;
2314 xfer->tx_ovrhd_cnt += cur_buf->out_tag.len;
2315
2316 if (i2c_msm_xfer_msg_is_last(ctrl))
2317 xfer->last_is_rx = cur_buf->is_rx;
2318 }
2319 xfer->cur_buf = (struct i2c_msm_xfer_buf){0};
2320}
2321
2322static int
2323i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
2324{
2325 int ret = 0;
2326 struct i2c_msm_ctrl *ctrl = i2c_get_adapdata(adap);
2327 struct i2c_msm_xfer *xfer = &ctrl->xfer;
2328
2329 if (IS_ERR_OR_NULL(msgs)) {
2330 dev_err(ctrl->dev, " error on msgs Accessing invalid pointer location\n");
2331 return (msgs) ? PTR_ERR(msgs) : -EINVAL;
2332 }
2333
2334 /* if system is suspended just bail out */
2335 if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
2336 dev_err(ctrl->dev,
2337 "slave:0x%x is calling xfer when system is suspended\n",
2338 msgs->addr);
2339 return -EIO;
2340 }
2341
2342 ret = i2c_msm_pm_xfer_start(ctrl);
2343 if (ret)
2344 return ret;
2345
2346 /* init xfer */
2347 xfer->msgs = msgs;
2348 xfer->msg_cnt = num;
2349 xfer->mode_id = I2C_MSM_XFER_MODE_NONE;
2350 xfer->err = 0;
2351 xfer->rx_cnt = 0;
2352 xfer->tx_cnt = 0;
2353 xfer->rx_ovrhd_cnt = 0;
2354 xfer->tx_ovrhd_cnt = 0;
2355 atomic_set(&xfer->event_cnt, 0);
2356 init_completion(&xfer->complete);
2357 init_completion(&xfer->rx_complete);
2358
2359 xfer->cur_buf.is_init = false;
2360 xfer->cur_buf.msg_idx = 0;
2361
2362 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_BEG, num,
2363 msgs->addr, 0);
2364
2365 i2c_msm_xfer_scan(ctrl);
2366 i2c_msm_xfer_calc_timeout(ctrl);
2367 xfer->mode_id = i2c_msm_qup_choose_mode(ctrl);
2368
2369 dev_dbg(ctrl->dev, "xfer() mode:%d msg_cnt:%d rx_cbt:%zu tx_cnt:%zu\n",
2370 xfer->mode_id, xfer->msg_cnt, xfer->rx_cnt, xfer->tx_cnt);
2371
2372 switch (xfer->mode_id) {
2373 case I2C_MSM_XFER_MODE_FIFO:
2374 ret = i2c_msm_fifo_xfer(ctrl);
2375 break;
2376 case I2C_MSM_XFER_MODE_BLOCK:
2377 ret = i2c_msm_blk_xfer(ctrl);
2378 break;
2379 case I2C_MSM_XFER_MODE_DMA:
2380 ret = i2c_msm_dma_xfer(ctrl);
2381 break;
2382 default:
2383 ret = -EINTR;
2384 }
2385
2386 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_SCAN_SUM,
2387 ((xfer->rx_cnt & 0xff) | ((xfer->rx_ovrhd_cnt & 0xff) << 16)),
2388 ((xfer->tx_cnt & 0xff) | ((xfer->tx_ovrhd_cnt & 0xff) << 16)),
2389 ((ctrl->xfer.timeout & 0xfff) | ((xfer->mode_id & 0xf) << 24)));
2390
2391 ret = i2c_msm_qup_post_xfer(ctrl, ret);
2392 /* on success, return number of messages sent (which is index + 1)*/
2393 if (!ret)
2394 ret = xfer->cur_buf.msg_idx + 1;
2395
2396 i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_END, ret, xfer->err,
2397 xfer->cur_buf.msg_idx + 1);
2398 /* process and dump profiling data */
2399 if (xfer->err || (ctrl->dbgfs.dbg_lvl >= MSM_PROF))
2400 i2c_msm_prof_evnt_dump(ctrl);
2401
2402 i2c_msm_pm_xfer_end(ctrl);
2403 return ret;
2404}
2405
2406enum i2c_msm_dt_entry_status {
2407 DT_REQ, /* Required: fail if missing */
2408 DT_SGST, /* Suggested: warn if missing */
2409 DT_OPT, /* Optional: don't warn if missing */
2410};
2411
2412enum i2c_msm_dt_entry_type {
2413 DT_U32,
2414 DT_BOOL,
2415 DT_ID, /* of_alias_get_id() */
2416};
2417
2418struct i2c_msm_dt_to_pdata_map {
2419 const char *dt_name;
2420 void *ptr_data;
2421 enum i2c_msm_dt_entry_status status;
2422 enum i2c_msm_dt_entry_type type;
2423 int default_val;
2424};
2425
2426static int i2c_msm_dt_to_pdata_populate(struct i2c_msm_ctrl *ctrl,
2427 struct platform_device *pdev,
2428 struct i2c_msm_dt_to_pdata_map *itr)
2429{
2430 int ret, err = 0;
2431 struct device_node *node = pdev->dev.of_node;
2432
2433 for (; itr->dt_name ; ++itr) {
2434 switch (itr->type) {
2435 case DT_U32:
2436 ret = of_property_read_u32(node, itr->dt_name,
2437 (u32 *) itr->ptr_data);
2438 break;
2439 case DT_BOOL:
2440 *((bool *) itr->ptr_data) =
2441 of_property_read_bool(node, itr->dt_name);
2442 ret = 0;
2443 break;
2444 case DT_ID:
2445 ret = of_alias_get_id(node, itr->dt_name);
2446 if (ret >= 0) {
2447 *((int *) itr->ptr_data) = ret;
2448 ret = 0;
2449 }
2450 break;
2451 default:
2452 dev_err(ctrl->dev,
2453 "error %d is of unknown DT entry type\n",
2454 itr->type);
2455 ret = -EBADE;
2456 }
2457
2458 i2c_msm_dbg(ctrl, MSM_PROF, "DT entry ret:%d name:%s val:%d\n",
2459 ret, itr->dt_name, *((int *)itr->ptr_data));
2460
2461 if (ret) {
2462 *((int *)itr->ptr_data) = itr->default_val;
2463
2464 if (itr->status < DT_OPT) {
2465 dev_err(ctrl->dev,
2466 "error Missing '%s' DT entry\n",
2467 itr->dt_name);
2468
2469 /* cont on err to dump all missing entries */
2470 if (itr->status == DT_REQ && !err)
2471 err = ret;
2472 }
2473 }
2474 }
2475
2476 return err;
2477}
2478
2479
2480/*
2481 * i2c_msm_rsrcs_process_dt: copy data from DT to platform data
2482 * @return zero on success or negative error code
2483 */
2484static int i2c_msm_rsrcs_process_dt(struct i2c_msm_ctrl *ctrl,
2485 struct platform_device *pdev)
2486{
2487 u32 fs_clk_div, ht_clk_div, noise_rjct_scl, noise_rjct_sda;
2488 int ret;
2489
2490 struct i2c_msm_dt_to_pdata_map map[] = {
2491 {"i2c", &pdev->id, DT_REQ, DT_ID, -1},
2492 {"qcom,clk-freq-out", &ctrl->rsrcs.clk_freq_out,
2493 DT_REQ, DT_U32, 0},
2494 {"qcom,clk-freq-in", &ctrl->rsrcs.clk_freq_in,
2495 DT_REQ, DT_U32, 0},
2496 {"qcom,disable-dma", &(ctrl->rsrcs.disable_dma),
2497 DT_OPT, DT_BOOL, 0},
2498 {"qcom,master-id", &(ctrl->rsrcs.clk_path_vote.mstr_id),
2499 DT_SGST, DT_U32, 0},
2500 {"qcom,noise-rjct-scl", &noise_rjct_scl,
2501 DT_OPT, DT_U32, 0},
2502 {"qcom,noise-rjct-sda", &noise_rjct_sda,
2503 DT_OPT, DT_U32, 0},
2504 {"qcom,high-time-clk-div", &ht_clk_div,
2505 DT_OPT, DT_U32, 0},
2506 {"qcom,fs-clk-div", &fs_clk_div,
2507 DT_OPT, DT_U32, 0},
2508 {NULL, NULL, 0, 0, 0},
2509 };
2510
2511 ret = i2c_msm_dt_to_pdata_populate(ctrl, pdev, map);
2512 if (ret)
2513 return ret;
2514
2515 /* set divider and noise reject values */
2516 return i2c_msm_set_mstr_clk_ctl(ctrl, fs_clk_div, ht_clk_div,
2517 noise_rjct_scl, noise_rjct_sda);
2518}
2519
2520/*
2521 * i2c_msm_rsrcs_mem_init: reads pdata request region and ioremap it
2522 * @return zero on success or negative error code
2523 */
2524static int i2c_msm_rsrcs_mem_init(struct platform_device *pdev,
2525 struct i2c_msm_ctrl *ctrl)
2526{
2527 struct resource *mem_region;
2528
2529 ctrl->rsrcs.mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2530 "qup_phys_addr");
2531 if (!ctrl->rsrcs.mem) {
2532 dev_err(ctrl->dev, "error Missing 'qup_phys_addr' resource\n");
2533 return -ENODEV;
2534 }
2535
2536 mem_region = request_mem_region(ctrl->rsrcs.mem->start,
2537 resource_size(ctrl->rsrcs.mem),
2538 pdev->name);
2539 if (!mem_region) {
2540 dev_err(ctrl->dev,
2541 "QUP physical memory region already claimed\n");
2542 return -EBUSY;
2543 }
2544
2545 ctrl->rsrcs.base = devm_ioremap(ctrl->dev, ctrl->rsrcs.mem->start,
2546 resource_size(ctrl->rsrcs.mem));
2547 if (!ctrl->rsrcs.base) {
2548 dev_err(ctrl->dev,
2549 "error failed ioremap(base:0x%llx size:0x%llx\n)\n",
2550 (u64) ctrl->rsrcs.mem->start,
2551 (u64) resource_size(ctrl->rsrcs.mem));
2552 release_mem_region(ctrl->rsrcs.mem->start,
2553 resource_size(ctrl->rsrcs.mem));
2554 return -ENOMEM;
2555 }
2556
2557 return 0;
2558}
2559
2560static void i2c_msm_rsrcs_mem_teardown(struct i2c_msm_ctrl *ctrl)
2561{
2562 release_mem_region(ctrl->rsrcs.mem->start,
2563 resource_size(ctrl->rsrcs.mem));
2564}
2565
2566/*
2567 * i2c_msm_rsrcs_irq_init: finds irq num in pdata and requests it
2568 * @return zero on success or negative error code
2569 */
2570static int i2c_msm_rsrcs_irq_init(struct platform_device *pdev,
2571 struct i2c_msm_ctrl *ctrl)
2572{
2573 int ret, irq;
2574
2575 irq = platform_get_irq_byname(pdev, "qup_irq");
2576 if (irq < 0) {
2577 dev_err(ctrl->dev, "error reading irq resource\n");
2578 return irq;
2579 }
2580
2581 ret = request_irq(irq, i2c_msm_qup_isr, IRQF_TRIGGER_HIGH,
2582 "i2c-msm-v2-irq", ctrl);
2583 if (ret) {
2584 dev_err(ctrl->dev, "error request_irq(irq_num:%d ) ret:%d\n",
2585 irq, ret);
2586 return ret;
2587 }
2588
2589 disable_irq(irq);
2590 ctrl->rsrcs.irq = irq;
2591 return 0;
2592}
2593
2594static void i2c_msm_rsrcs_irq_teardown(struct i2c_msm_ctrl *ctrl)
2595{
2596 free_irq(ctrl->rsrcs.irq, ctrl);
2597}
2598
2599
2600static struct pinctrl_state *
2601i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl, const char *name)
2602{
2603 struct pinctrl_state *pin_state
2604 = pinctrl_lookup_state(ctrl->rsrcs.pinctrl, name);
2605
2606 if (IS_ERR_OR_NULL(pin_state))
2607 dev_info(ctrl->dev, "note pinctrl_lookup_state(%s) err:%ld\n",
2608 name, PTR_ERR(pin_state));
2609 return pin_state;
2610}
2611
2612/*
2613 * i2c_msm_rsrcs_gpio_pinctrl_init: initializes the pinctrl for i2c gpios
2614 *
2615 * @pre platform data must be initialized
2616 */
2617static int i2c_msm_rsrcs_gpio_pinctrl_init(struct i2c_msm_ctrl *ctrl)
2618{
2619 ctrl->rsrcs.pinctrl = devm_pinctrl_get(ctrl->dev);
2620 if (IS_ERR_OR_NULL(ctrl->rsrcs.pinctrl)) {
2621 dev_err(ctrl->dev, "error devm_pinctrl_get() failed err:%ld\n",
2622 PTR_ERR(ctrl->rsrcs.pinctrl));
2623 return PTR_ERR(ctrl->rsrcs.pinctrl);
2624 }
2625
2626 ctrl->rsrcs.gpio_state_active =
2627 i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_ACTIVE);
2628
2629 ctrl->rsrcs.gpio_state_suspend =
2630 i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_SUSPEND);
2631
2632 return 0;
2633}
2634
2635static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
2636 bool runtime_active)
2637{
2638 struct pinctrl_state *pins_state;
2639 const char *pins_state_name;
2640
2641 if (runtime_active) {
2642 pins_state = ctrl->rsrcs.gpio_state_active;
2643 pins_state_name = I2C_MSM_PINCTRL_ACTIVE;
2644 } else {
2645 pins_state = ctrl->rsrcs.gpio_state_suspend;
2646 pins_state_name = I2C_MSM_PINCTRL_SUSPEND;
2647 }
2648
2649 if (!IS_ERR_OR_NULL(pins_state)) {
2650 int ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, pins_state);
2651
2652 if (ret)
2653 dev_err(ctrl->dev,
2654 "error pinctrl_select_state(%s) err:%d\n",
2655 pins_state_name, ret);
2656 } else {
2657 dev_err(ctrl->dev,
2658 "error pinctrl state-name:'%s' is not configured\n",
2659 pins_state_name);
2660 }
2661}
2662
2663/*
2664 * i2c_msm_rsrcs_clk_init: get clocks and set rate
2665 *
2666 * @return zero on success or negative error code
2667 */
2668static int i2c_msm_rsrcs_clk_init(struct i2c_msm_ctrl *ctrl)
2669{
2670 int ret = 0;
2671
2672 if ((ctrl->rsrcs.clk_freq_out <= 0) ||
2673 (ctrl->rsrcs.clk_freq_out > I2C_MSM_CLK_FAST_PLUS_FREQ)) {
2674 dev_err(ctrl->dev,
2675 "error clock frequency %dKHZ is not supported\n",
2676 (ctrl->rsrcs.clk_freq_out / 1000));
2677 return -EIO;
2678 }
2679
2680 ctrl->rsrcs.core_clk = clk_get(ctrl->dev, "core_clk");
2681 if (IS_ERR(ctrl->rsrcs.core_clk)) {
2682 ret = PTR_ERR(ctrl->rsrcs.core_clk);
2683 dev_err(ctrl->dev, "error on clk_get(core_clk):%d\n", ret);
2684 return ret;
2685 }
2686
2687 ret = clk_set_rate(ctrl->rsrcs.core_clk, ctrl->rsrcs.clk_freq_in);
2688 if (ret) {
2689 dev_err(ctrl->dev, "error on clk_set_rate(core_clk, %dKHz):%d\n",
2690 (ctrl->rsrcs.clk_freq_in / 1000), ret);
2691 goto err_set_rate;
2692 }
2693
2694 ctrl->rsrcs.iface_clk = clk_get(ctrl->dev, "iface_clk");
2695 if (IS_ERR(ctrl->rsrcs.iface_clk)) {
2696 ret = PTR_ERR(ctrl->rsrcs.iface_clk);
2697 dev_err(ctrl->dev, "error on clk_get(iface_clk):%d\n", ret);
2698 goto err_set_rate;
2699 }
2700
2701 return 0;
2702
2703err_set_rate:
2704 clk_put(ctrl->rsrcs.core_clk);
2705 ctrl->rsrcs.core_clk = NULL;
2706 return ret;
2707}
2708
2709static void i2c_msm_rsrcs_clk_teardown(struct i2c_msm_ctrl *ctrl)
2710{
2711 clk_put(ctrl->rsrcs.core_clk);
2712 clk_put(ctrl->rsrcs.iface_clk);
2713 i2c_msm_clk_path_teardown(ctrl);
2714}
2715
2716
2717
2718static void i2c_msm_pm_suspend(struct device *dev)
2719{
2720 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2721
2722 if (ctrl->pwr_state == I2C_MSM_PM_RT_SUSPENDED) {
2723 dev_err(ctrl->dev, "attempt to suspend when suspended\n");
2724 return;
2725 }
2726 i2c_msm_dbg(ctrl, MSM_DBG, "suspending...\n");
2727 i2c_msm_pm_clk_unprepare(ctrl);
2728 i2c_msm_clk_path_unvote(ctrl);
2729
2730 /*
2731 * We implement system and runtime suspend in the same way. However
2732 * it is important for us to distinguish between them in when servicing
2733 * a transfer requests. If we get transfer request while in runtime
2734 * suspend we want to simply wake up and service that request. But if we
2735 * get a transfer request while system is suspending we want to bail
2736 * out on that request. This is why if we marked that we are in system
2737 * suspend, we do not want to override that state with runtime suspend.
2738 */
2739 if (ctrl->pwr_state != I2C_MSM_PM_SYS_SUSPENDED)
2740 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2741}
2742
2743static int i2c_msm_pm_resume(struct device *dev)
2744{
2745 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2746
2747 if (ctrl->pwr_state == I2C_MSM_PM_RT_ACTIVE)
2748 return 0;
2749
2750 i2c_msm_dbg(ctrl, MSM_DBG, "resuming...\n");
2751
2752 i2c_msm_clk_path_vote(ctrl);
2753 i2c_msm_pm_clk_prepare(ctrl);
2754 ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
2755 return 0;
2756}
2757
2758#ifdef CONFIG_PM
2759/*
2760 * i2c_msm_pm_sys_suspend_noirq: system power management callback
2761 */
2762static int i2c_msm_pm_sys_suspend_noirq(struct device *dev)
2763{
2764 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2765 enum i2c_msm_power_state prev_state = ctrl->pwr_state;
2766
2767 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...\n");
2768
2769 /* Acquire mutex to ensure current transaction is over */
2770 mutex_lock(&ctrl->xfer.mtx);
2771 ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
2772 mutex_unlock(&ctrl->xfer.mtx);
2773 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...\n");
2774
2775 if (prev_state == I2C_MSM_PM_RT_ACTIVE) {
2776 i2c_msm_pm_suspend(dev);
2777 /*
2778 * Synchronize runtime-pm and system-pm states:
2779 * at this point we are already suspended. However, the
2780 * runtime-PM framework still thinks that we are active.
2781 * The three calls below let the runtime-PM know that we are
2782 * suspended already without re-invoking the suspend callback
2783 */
2784 pm_runtime_disable(dev);
2785 pm_runtime_set_suspended(dev);
2786 pm_runtime_enable(dev);
2787 }
2788
2789 return 0;
2790}
2791
2792/*
2793 * i2c_msm_pm_sys_resume: system power management callback
2794 * shifts the controller's power state from system suspend to runtime suspend
2795 */
2796static int i2c_msm_pm_sys_resume_noirq(struct device *dev)
2797{
2798 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2799
2800 i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: resuming...\n");
2801 mutex_lock(&ctrl->xfer.mtx);
2802 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2803 mutex_unlock(&ctrl->xfer.mtx);
2804 return 0;
2805}
2806#endif
2807
2808#ifdef CONFIG_PM
2809static void i2c_msm_pm_rt_init(struct device *dev)
2810{
2811 pm_runtime_set_suspended(dev);
2812 pm_runtime_set_autosuspend_delay(dev, (MSEC_PER_SEC >> 2));
2813 pm_runtime_use_autosuspend(dev);
2814 pm_runtime_enable(dev);
2815}
2816
2817/*
2818 * i2c_msm_pm_rt_suspend: runtime power management callback
2819 */
2820static int i2c_msm_pm_rt_suspend(struct device *dev)
2821{
2822 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2823
2824 i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: suspending...\n");
2825 i2c_msm_pm_suspend(dev);
2826 return 0;
2827}
2828
2829/*
2830 * i2c_msm_pm_rt_resume: runtime power management callback
2831 */
2832static int i2c_msm_pm_rt_resume(struct device *dev)
2833{
2834 struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
2835
2836 i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: resuming...\n");
2837 return i2c_msm_pm_resume(dev);
2838}
2839
2840#else
2841static void i2c_msm_pm_rt_init(struct device *dev) {}
2842#define i2c_msm_pm_rt_suspend NULL
2843#define i2c_msm_pm_rt_resume NULL
2844#endif
2845
2846static const struct dev_pm_ops i2c_msm_pm_ops = {
2847#ifdef CONFIG_PM_SLEEP
2848 .suspend_noirq = i2c_msm_pm_sys_suspend_noirq,
2849 .resume_noirq = i2c_msm_pm_sys_resume_noirq,
2850#endif
2851 SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
2852 i2c_msm_pm_rt_resume,
2853 NULL)
2854};
2855
2856static u32 i2c_msm_frmwrk_func(struct i2c_adapter *adap)
2857{
2858 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
2859}
2860
2861static const struct i2c_algorithm i2c_msm_frmwrk_algrtm = {
2862 .master_xfer = i2c_msm_frmwrk_xfer,
2863 .functionality = i2c_msm_frmwrk_func,
2864};
2865
2866static const char * const i2c_msm_adapter_name = "MSM-I2C-v2-adapter";
2867
2868static int i2c_msm_frmwrk_reg(struct platform_device *pdev,
2869 struct i2c_msm_ctrl *ctrl)
2870{
2871 int ret;
2872
2873 i2c_set_adapdata(&ctrl->adapter, ctrl);
2874 ctrl->adapter.algo = &i2c_msm_frmwrk_algrtm;
2875 strlcpy(ctrl->adapter.name, i2c_msm_adapter_name,
2876 sizeof(ctrl->adapter.name));
2877
2878 ctrl->adapter.nr = pdev->id;
2879 ctrl->adapter.dev.parent = &pdev->dev;
2880 ctrl->adapter.dev.of_node = pdev->dev.of_node;
2881 ret = i2c_add_numbered_adapter(&ctrl->adapter);
2882 if (ret) {
2883 dev_err(ctrl->dev, "error i2c_add_adapter failed\n");
2884 return ret;
2885 }
2886
2887 return ret;
2888}
2889
2890static void i2c_msm_frmwrk_unreg(struct i2c_msm_ctrl *ctrl)
2891{
2892 i2c_del_adapter(&ctrl->adapter);
2893}
2894
2895static int i2c_msm_probe(struct platform_device *pdev)
2896{
2897 struct i2c_msm_ctrl *ctrl;
2898 int ret = 0;
2899
2900 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
2901 if (!ctrl)
2902 return -ENOMEM;
2903 ctrl->dev = &pdev->dev;
2904 platform_set_drvdata(pdev, ctrl);
2905 ctrl->dbgfs.dbg_lvl = DEFAULT_DBG_LVL;
2906 ctrl->dbgfs.force_xfer_mode = I2C_MSM_XFER_MODE_NONE;
2907 mutex_init(&ctrl->xfer.mtx);
2908 ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
2909
2910 if (!pdev->dev.of_node) {
2911 dev_err(&pdev->dev, "error: null device-tree node\n");
2912 return -EBADE;
2913 }
2914
2915 ret = i2c_msm_rsrcs_process_dt(ctrl, pdev);
2916 if (ret) {
2917 dev_err(ctrl->dev, "error in process device tree node\n");
2918 return ret;
2919 }
2920
2921 ret = i2c_msm_rsrcs_mem_init(pdev, ctrl);
2922 if (ret)
2923 goto mem_err;
2924
2925 ret = i2c_msm_rsrcs_clk_init(ctrl);
2926 if (ret)
2927 goto clk_err;
2928
2929 /* vote for clock to enable reading the version number off the HW */
2930 i2c_msm_clk_path_vote(ctrl);
2931
2932 ret = i2c_msm_pm_clk_prepare(ctrl);
2933 if (ret)
2934 goto clk_err;
2935
2936 ret = i2c_msm_pm_clk_enable(ctrl);
2937 if (ret) {
2938 i2c_msm_pm_clk_unprepare(ctrl);
2939 goto clk_err;
2940 }
2941
2942 /*
2943 * reset the core before registering for interrupts. This solves an
2944 * interrupt storm issue when the bootloader leaves a pending interrupt.
2945 */
2946 ret = i2c_msm_qup_sw_reset(ctrl);
2947 if (ret)
2948 dev_err(ctrl->dev, "error error on qup software reset\n");
2949
2950 i2c_msm_pm_clk_disable(ctrl);
2951 i2c_msm_pm_clk_unprepare(ctrl);
2952 i2c_msm_clk_path_unvote(ctrl);
2953
2954 ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
2955 if (ret)
2956 goto err_no_pinctrl;
2957
2958 i2c_msm_pm_rt_init(ctrl->dev);
2959
2960 ret = i2c_msm_rsrcs_irq_init(pdev, ctrl);
2961 if (ret)
2962 goto irq_err;
2963
2964 i2c_msm_dbgfs_init(ctrl);
2965
2966 ret = i2c_msm_frmwrk_reg(pdev, ctrl);
2967 if (ret)
2968 goto reg_err;
2969
2970 i2c_msm_dbg(ctrl, MSM_PROF, "probe() completed with success\n");
2971 return 0;
2972
2973reg_err:
2974 i2c_msm_dbgfs_teardown(ctrl);
2975 i2c_msm_rsrcs_irq_teardown(ctrl);
2976irq_err:
2977 i2x_msm_blk_free_cache(ctrl);
2978err_no_pinctrl:
2979 i2c_msm_rsrcs_clk_teardown(ctrl);
2980clk_err:
2981 i2c_msm_rsrcs_mem_teardown(ctrl);
2982mem_err:
2983 dev_err(ctrl->dev, "error probe() failed with err:%d\n", ret);
2984 return ret;
2985}
2986
2987static int i2c_msm_remove(struct platform_device *pdev)
2988{
2989 struct i2c_msm_ctrl *ctrl = platform_get_drvdata(pdev);
2990
2991 /* Grab mutex to ensure ongoing transaction is over */
2992 mutex_lock(&ctrl->xfer.mtx);
2993 ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
2994 pm_runtime_disable(ctrl->dev);
2995 /* no one can call a xfer after the next line */
2996 i2c_msm_frmwrk_unreg(ctrl);
2997 mutex_unlock(&ctrl->xfer.mtx);
2998 mutex_destroy(&ctrl->xfer.mtx);
2999
3000 i2c_msm_dma_teardown(ctrl);
3001 i2c_msm_dbgfs_teardown(ctrl);
3002 i2c_msm_rsrcs_irq_teardown(ctrl);
3003 i2c_msm_rsrcs_clk_teardown(ctrl);
3004 i2c_msm_rsrcs_mem_teardown(ctrl);
3005 i2x_msm_blk_free_cache(ctrl);
3006 return 0;
3007}
3008
3009static const struct of_device_id i2c_msm_dt_match[] = {
3010 {
3011 .compatible = "qcom,i2c-msm-v2",
3012 },
3013 {}
3014};
3015
3016static struct platform_driver i2c_msm_driver = {
3017 .probe = i2c_msm_probe,
3018 .remove = i2c_msm_remove,
3019 .driver = {
3020 .name = "i2c-msm-v2",
3021 .pm = &i2c_msm_pm_ops,
3022 .of_match_table = i2c_msm_dt_match,
3023 },
3024};
3025
3026static int i2c_msm_init(void)
3027{
3028 return platform_driver_register(&i2c_msm_driver);
3029}
3030subsys_initcall(i2c_msm_init);
3031
3032static void i2c_msm_exit(void)
3033{
3034 platform_driver_unregister(&i2c_msm_driver);
3035}
3036module_exit(i2c_msm_exit);
3037
3038MODULE_LICENSE("GPL v2");
3039MODULE_ALIAS("platform:i2c-msm-v2");