blob: 58ca336c9509c97e48fdee88542b3119a94355c3 [file] [log] [blame]
John Garrye8899fa2015-11-18 00:50:30 +08001/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
John Garry42e7a692015-11-18 00:50:49 +080015#define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
17
18static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
19{
20 return device->port->ha->lldd_ha;
21}
22
John Garry257efd12015-11-18 00:50:36 +080023static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
24{
25 void *bitmap = hisi_hba->slot_index_tags;
26
27 clear_bit(slot_idx, bitmap);
28}
29
John Garry42e7a692015-11-18 00:50:49 +080030static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
31{
32 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
33}
34
35static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
36{
37 void *bitmap = hisi_hba->slot_index_tags;
38
39 set_bit(slot_idx, bitmap);
40}
41
42static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
43{
44 unsigned int index;
45 void *bitmap = hisi_hba->slot_index_tags;
46
47 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
48 if (index >= hisi_hba->slot_index_count)
49 return -SAS_QUEUE_FULL;
50 hisi_sas_slot_index_set(hisi_hba, index);
51 *slot_idx = index;
52 return 0;
53}
54
John Garry257efd12015-11-18 00:50:36 +080055static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
56{
57 int i;
58
59 for (i = 0; i < hisi_hba->slot_index_count; ++i)
60 hisi_sas_slot_index_clear(hisi_hba, i);
61}
John Garry27a3f222015-11-18 00:50:50 +080062
63void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
64 struct hisi_sas_slot *slot)
65{
66 struct device *dev = &hisi_hba->pdev->dev;
67
68 if (!slot->task)
69 return;
70
71 if (!sas_protocol_ata(task->task_proto))
72 if (slot->n_elem)
73 dma_unmap_sg(dev, task->scatter, slot->n_elem,
74 task->data_dir);
75
76 if (slot->command_table)
77 dma_pool_free(hisi_hba->command_table_pool,
78 slot->command_table, slot->command_table_dma);
79
80 if (slot->status_buffer)
81 dma_pool_free(hisi_hba->status_buffer_pool,
82 slot->status_buffer, slot->status_buffer_dma);
83
84 if (slot->sge_page)
85 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
86 slot->sge_page_dma);
87
88 list_del_init(&slot->entry);
89 task->lldd_task = NULL;
90 slot->task = NULL;
91 slot->port = NULL;
92 hisi_sas_slot_index_free(hisi_hba, slot->idx);
93 memset(slot, 0, sizeof(*slot));
94}
95EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
96
John Garry66ee9992015-11-18 00:50:54 +080097static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
98 struct hisi_sas_slot *slot)
99{
100 return hisi_hba->hw->prep_smp(hisi_hba, slot);
101}
102
John Garry42e7a692015-11-18 00:50:49 +0800103static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
104 struct hisi_sas_slot *slot, int is_tmf,
105 struct hisi_sas_tmf_task *tmf)
106{
107 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
108}
109
John Garry6f2ff1a2016-01-26 02:47:20 +0800110static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
111 struct hisi_sas_slot *slot)
112{
113 return hisi_hba->hw->prep_stp(hisi_hba, slot);
114}
115
John Garry42e7a692015-11-18 00:50:49 +0800116static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
117 int is_tmf, struct hisi_sas_tmf_task *tmf,
118 int *pass)
119{
120 struct domain_device *device = task->dev;
121 struct hisi_sas_device *sas_dev = device->lldd_dev;
122 struct hisi_sas_port *port;
123 struct hisi_sas_slot *slot;
124 struct hisi_sas_cmd_hdr *cmd_hdr_base;
125 struct device *dev = &hisi_hba->pdev->dev;
126 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
127
128 if (!device->port) {
129 struct task_status_struct *ts = &task->task_status;
130
131 ts->resp = SAS_TASK_UNDELIVERED;
132 ts->stat = SAS_PHY_DOWN;
133 /*
134 * libsas will use dev->port, should
135 * not call task_done for sata
136 */
137 if (device->dev_type != SAS_SATA_DEV)
138 task->task_done(task);
139 return 0;
140 }
141
142 if (DEV_IS_GONE(sas_dev)) {
143 if (sas_dev)
144 dev_info(dev, "task prep: device %llu not ready\n",
145 sas_dev->device_id);
146 else
147 dev_info(dev, "task prep: device %016llx not ready\n",
148 SAS_ADDR(device->sas_addr));
149
150 rc = SAS_PHY_DOWN;
151 return rc;
152 }
153 port = device->port->lldd_port;
154 if (port && !port->port_attached && !tmf) {
155 if (sas_protocol_ata(task->task_proto)) {
156 struct task_status_struct *ts = &task->task_status;
157
158 dev_info(dev,
159 "task prep: SATA/STP port%d not attach device\n",
160 device->port->id);
161 ts->resp = SAS_TASK_COMPLETE;
162 ts->stat = SAS_PHY_DOWN;
163 task->task_done(task);
164 } else {
165 struct task_status_struct *ts = &task->task_status;
166
167 dev_info(dev,
168 "task prep: SAS port%d does not attach device\n",
169 device->port->id);
170 ts->resp = SAS_TASK_UNDELIVERED;
171 ts->stat = SAS_PHY_DOWN;
172 task->task_done(task);
173 }
174 return 0;
175 }
176
177 if (!sas_protocol_ata(task->task_proto)) {
178 if (task->num_scatter) {
179 n_elem = dma_map_sg(dev, task->scatter,
180 task->num_scatter, task->data_dir);
181 if (!n_elem) {
182 rc = -ENOMEM;
183 goto prep_out;
184 }
185 }
186 } else
187 n_elem = task->num_scatter;
188
189 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
190 if (rc)
191 goto err_out;
192 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
193 &dlvry_queue_slot);
194 if (rc)
195 goto err_out_tag;
196
197 slot = &hisi_hba->slot_info[slot_idx];
198 memset(slot, 0, sizeof(struct hisi_sas_slot));
199
200 slot->idx = slot_idx;
201 slot->n_elem = n_elem;
202 slot->dlvry_queue = dlvry_queue;
203 slot->dlvry_queue_slot = dlvry_queue_slot;
204 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
205 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
206 slot->task = task;
207 slot->port = port;
208 task->lldd_task = slot;
209
210 slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
211 GFP_ATOMIC,
212 &slot->status_buffer_dma);
Dan Carpenter9c9d18e2015-12-09 13:48:36 +0300213 if (!slot->status_buffer) {
214 rc = -ENOMEM;
John Garry42e7a692015-11-18 00:50:49 +0800215 goto err_out_slot_buf;
Dan Carpenter9c9d18e2015-12-09 13:48:36 +0300216 }
John Garry42e7a692015-11-18 00:50:49 +0800217 memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
218
219 slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
220 GFP_ATOMIC,
221 &slot->command_table_dma);
Dan Carpenter9c9d18e2015-12-09 13:48:36 +0300222 if (!slot->command_table) {
223 rc = -ENOMEM;
John Garry42e7a692015-11-18 00:50:49 +0800224 goto err_out_status_buf;
Dan Carpenter9c9d18e2015-12-09 13:48:36 +0300225 }
John Garry42e7a692015-11-18 00:50:49 +0800226 memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
227 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
228
229 switch (task->task_proto) {
John Garry66ee9992015-11-18 00:50:54 +0800230 case SAS_PROTOCOL_SMP:
231 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
232 break;
John Garry42e7a692015-11-18 00:50:49 +0800233 case SAS_PROTOCOL_SSP:
234 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
235 break;
236 case SAS_PROTOCOL_SATA:
237 case SAS_PROTOCOL_STP:
238 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
John Garry6f2ff1a2016-01-26 02:47:20 +0800239 rc = hisi_sas_task_prep_ata(hisi_hba, slot);
240 break;
John Garry42e7a692015-11-18 00:50:49 +0800241 default:
242 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
243 task->task_proto);
244 rc = -EINVAL;
245 break;
246 }
247
248 if (rc) {
249 dev_err(dev, "task prep: rc = 0x%x\n", rc);
250 if (slot->sge_page)
251 goto err_out_sge;
252 goto err_out_command_table;
253 }
254
255 list_add_tail(&slot->entry, &port->list);
256 spin_lock(&task->task_state_lock);
257 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
258 spin_unlock(&task->task_state_lock);
259
260 hisi_hba->slot_prep = slot;
261
262 sas_dev->running_req++;
263 ++(*pass);
264
Dan Carpenter9c9d18e2015-12-09 13:48:36 +0300265 return 0;
John Garry42e7a692015-11-18 00:50:49 +0800266
267err_out_sge:
268 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
269 slot->sge_page_dma);
270err_out_command_table:
271 dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
272 slot->command_table_dma);
273err_out_status_buf:
274 dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
275 slot->status_buffer_dma);
276err_out_slot_buf:
277 /* Nothing to be done */
278err_out_tag:
279 hisi_sas_slot_index_free(hisi_hba, slot_idx);
280err_out:
281 dev_err(dev, "task prep: failed[%d]!\n", rc);
282 if (!sas_protocol_ata(task->task_proto))
283 if (n_elem)
284 dma_unmap_sg(dev, task->scatter, n_elem,
285 task->data_dir);
286prep_out:
287 return rc;
288}
289
290static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
291 int is_tmf, struct hisi_sas_tmf_task *tmf)
292{
293 u32 rc;
294 u32 pass = 0;
295 unsigned long flags;
296 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
297 struct device *dev = &hisi_hba->pdev->dev;
298
299 /* protect task_prep and start_delivery sequence */
300 spin_lock_irqsave(&hisi_hba->lock, flags);
301 rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
302 if (rc)
303 dev_err(dev, "task exec: failed[%d]!\n", rc);
304
305 if (likely(pass))
306 hisi_hba->hw->start_delivery(hisi_hba);
307 spin_unlock_irqrestore(&hisi_hba->lock, flags);
308
309 return rc;
310}
John Garry257efd12015-11-18 00:50:36 +0800311
John Garry66139922015-11-18 00:50:48 +0800312static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
313{
314 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
315 struct asd_sas_phy *sas_phy = &phy->sas_phy;
316 struct sas_ha_struct *sas_ha;
317
318 if (!phy->phy_attached)
319 return;
320
321 sas_ha = &hisi_hba->sha;
322 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
323
324 if (sas_phy->phy) {
325 struct sas_phy *sphy = sas_phy->phy;
326
327 sphy->negotiated_linkrate = sas_phy->linkrate;
328 sphy->minimum_linkrate = phy->minimum_linkrate;
329 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
330 sphy->maximum_linkrate = phy->maximum_linkrate;
331 }
332
333 if (phy->phy_type & PORT_TYPE_SAS) {
334 struct sas_identify_frame *id;
335
336 id = (struct sas_identify_frame *)phy->frame_rcvd;
337 id->dev_type = phy->identify.device_type;
338 id->initiator_bits = SAS_PROTOCOL_ALL;
339 id->target_bits = phy->identify.target_port_protocols;
340 } else if (phy->phy_type & PORT_TYPE_SATA) {
341 /*Nothing*/
342 }
343
344 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
345 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
346}
347
John Garryabda97c2015-11-18 00:50:51 +0800348static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
349{
350 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
351 struct hisi_sas_device *sas_dev = NULL;
352 int i;
353
354 spin_lock(&hisi_hba->lock);
355 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
356 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
357 hisi_hba->devices[i].device_id = i;
358 sas_dev = &hisi_hba->devices[i];
359 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
360 sas_dev->dev_type = device->dev_type;
361 sas_dev->hisi_hba = hisi_hba;
362 sas_dev->sas_device = device;
363 break;
364 }
365 }
366 spin_unlock(&hisi_hba->lock);
367
368 return sas_dev;
369}
370
371static int hisi_sas_dev_found(struct domain_device *device)
372{
373 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
374 struct domain_device *parent_dev = device->parent;
375 struct hisi_sas_device *sas_dev;
376 struct device *dev = &hisi_hba->pdev->dev;
377
378 sas_dev = hisi_sas_alloc_dev(device);
379 if (!sas_dev) {
380 dev_err(dev, "fail alloc dev: max support %d devices\n",
381 HISI_SAS_MAX_DEVICES);
382 return -EINVAL;
383 }
384
385 device->lldd_dev = sas_dev;
386 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
387
388 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
389 int phy_no;
390 u8 phy_num = parent_dev->ex_dev.num_phys;
391 struct ex_phy *phy;
392
393 for (phy_no = 0; phy_no < phy_num; phy_no++) {
394 phy = &parent_dev->ex_dev.ex_phy[phy_no];
395 if (SAS_ADDR(phy->attached_sas_addr) ==
396 SAS_ADDR(device->sas_addr)) {
397 sas_dev->attached_phy = phy_no;
398 break;
399 }
400 }
401
402 if (phy_no == phy_num) {
403 dev_info(dev, "dev found: no attached "
404 "dev:%016llx at ex:%016llx\n",
405 SAS_ADDR(device->sas_addr),
406 SAS_ADDR(parent_dev->sas_addr));
407 return -EINVAL;
408 }
409 }
410
411 return 0;
412}
413
John Garry701f75e2015-11-18 00:50:55 +0800414static void hisi_sas_scan_start(struct Scsi_Host *shost)
415{
416 struct hisi_hba *hisi_hba = shost_priv(shost);
417 int i;
418
419 for (i = 0; i < hisi_hba->n_phy; ++i)
420 hisi_sas_bytes_dmaed(hisi_hba, i);
421
422 hisi_hba->scan_finished = 1;
423}
424
425static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
426{
427 struct hisi_hba *hisi_hba = shost_priv(shost);
428 struct sas_ha_struct *sha = &hisi_hba->sha;
429
430 if (hisi_hba->scan_finished == 0)
431 return 0;
432
433 sas_drain_work(sha);
434 return 1;
435}
436
John Garry66139922015-11-18 00:50:48 +0800437static void hisi_sas_phyup_work(struct work_struct *work)
438{
439 struct hisi_sas_phy *phy =
440 container_of(work, struct hisi_sas_phy, phyup_ws);
441 struct hisi_hba *hisi_hba = phy->hisi_hba;
442 struct asd_sas_phy *sas_phy = &phy->sas_phy;
443 int phy_no = sas_phy->id;
444
445 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
446 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
447}
John Garry976867e2015-11-18 00:50:42 +0800448
449static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
450{
451 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
452 struct asd_sas_phy *sas_phy = &phy->sas_phy;
453
454 phy->hisi_hba = hisi_hba;
455 phy->port = NULL;
456 init_timer(&phy->timer);
457 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
458 sas_phy->class = SAS;
459 sas_phy->iproto = SAS_PROTOCOL_ALL;
460 sas_phy->tproto = 0;
461 sas_phy->type = PHY_TYPE_PHYSICAL;
462 sas_phy->role = PHY_ROLE_INITIATOR;
463 sas_phy->oob_mode = OOB_NOT_CONNECTED;
464 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
465 sas_phy->id = phy_no;
466 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
467 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
468 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
469 sas_phy->lldd_phy = phy;
John Garry66139922015-11-18 00:50:48 +0800470
471 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
John Garry976867e2015-11-18 00:50:42 +0800472}
473
John Garry184a4632015-11-18 00:50:52 +0800474static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
475{
476 struct sas_ha_struct *sas_ha = sas_phy->ha;
477 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
478 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
479 struct asd_sas_port *sas_port = sas_phy->port;
480 struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
481 unsigned long flags;
482
483 if (!sas_port)
484 return;
485
486 spin_lock_irqsave(&hisi_hba->lock, flags);
487 port->port_attached = 1;
488 port->id = phy->port_id;
489 phy->port = port;
490 sas_port->lldd_port = port;
491 spin_unlock_irqrestore(&hisi_hba->lock, flags);
492}
493
494static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
495 struct domain_device *device)
496{
497 struct hisi_sas_phy *phy;
498 struct hisi_sas_port *port;
499 struct hisi_sas_slot *slot, *slot2;
500 struct device *dev = &hisi_hba->pdev->dev;
501
502 phy = &hisi_hba->phy[phy_no];
503 port = phy->port;
504 if (!port)
505 return;
506
507 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
508 struct sas_task *task;
509
510 task = slot->task;
511 if (device && task->dev != device)
512 continue;
513
514 dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
515 slot->dlvry_queue, slot->dlvry_queue_slot, task);
516 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
517 }
518}
519
520static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
521{
522 struct domain_device *device;
523 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
524 struct asd_sas_port *sas_port = sas_phy->port;
525
526 list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
527 hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
528}
529
530static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
531 struct domain_device *device)
532{
533 struct asd_sas_port *port = device->port;
534 struct asd_sas_phy *sas_phy;
535
536 list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
537 hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
538}
539
John Garryabda97c2015-11-18 00:50:51 +0800540static void hisi_sas_dev_gone(struct domain_device *device)
541{
542 struct hisi_sas_device *sas_dev = device->lldd_dev;
543 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
544 struct device *dev = &hisi_hba->pdev->dev;
545 u64 dev_id = sas_dev->device_id;
546
547 dev_info(dev, "found dev[%lld:%x] is gone\n",
548 sas_dev->device_id, sas_dev->dev_type);
549
550 hisi_hba->hw->free_device(hisi_hba, sas_dev);
551 device->lldd_dev = NULL;
552 memset(sas_dev, 0, sizeof(*sas_dev));
553 sas_dev->device_id = dev_id;
554 sas_dev->dev_type = SAS_PHY_UNUSED;
555 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
556}
John Garry42e7a692015-11-18 00:50:49 +0800557
558static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
559{
560 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
561}
562
John Garrye4189d52015-11-18 00:50:57 +0800563static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
564 void *funcdata)
565{
566 struct sas_ha_struct *sas_ha = sas_phy->ha;
567 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
568 int phy_no = sas_phy->id;
569
570 switch (func) {
571 case PHY_FUNC_HARD_RESET:
572 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
573 break;
574
575 case PHY_FUNC_LINK_RESET:
576 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
577 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
578 break;
579
580 case PHY_FUNC_DISABLE:
581 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
582 break;
583
584 case PHY_FUNC_SET_LINK_RATE:
585 case PHY_FUNC_RELEASE_SPINUP_HOLD:
586 default:
587 return -EOPNOTSUPP;
588 }
589 return 0;
590}
John Garry184a4632015-11-18 00:50:52 +0800591
John Garry0efff302015-11-18 00:50:56 +0800592static void hisi_sas_task_done(struct sas_task *task)
593{
594 if (!del_timer(&task->slow_task->timer))
595 return;
596 complete(&task->slow_task->completion);
597}
598
599static void hisi_sas_tmf_timedout(unsigned long data)
600{
601 struct sas_task *task = (struct sas_task *)data;
602
603 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
604 complete(&task->slow_task->completion);
605}
606
607#define TASK_TIMEOUT 20
608#define TASK_RETRY 3
609static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
610 void *parameter, u32 para_len,
611 struct hisi_sas_tmf_task *tmf)
612{
613 struct hisi_sas_device *sas_dev = device->lldd_dev;
614 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
615 struct device *dev = &hisi_hba->pdev->dev;
616 struct sas_task *task;
617 int res, retry;
618
619 for (retry = 0; retry < TASK_RETRY; retry++) {
620 task = sas_alloc_slow_task(GFP_KERNEL);
621 if (!task)
622 return -ENOMEM;
623
624 task->dev = device;
625 task->task_proto = device->tproto;
626
627 memcpy(&task->ssp_task, parameter, para_len);
628 task->task_done = hisi_sas_task_done;
629
630 task->slow_task->timer.data = (unsigned long) task;
631 task->slow_task->timer.function = hisi_sas_tmf_timedout;
632 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
633 add_timer(&task->slow_task->timer);
634
635 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
636
637 if (res) {
638 del_timer(&task->slow_task->timer);
639 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
640 res);
641 goto ex_err;
642 }
643
644 wait_for_completion(&task->slow_task->completion);
645 res = TMF_RESP_FUNC_FAILED;
646 /* Even TMF timed out, return direct. */
647 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
648 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
649 dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
650 tmf->tag_of_task_to_be_managed);
651 if (task->lldd_task) {
652 struct hisi_sas_slot *slot =
653 task->lldd_task;
654
655 hisi_sas_slot_task_free(hisi_hba,
656 task, slot);
657 }
658
659 goto ex_err;
660 }
661 }
662
663 if (task->task_status.resp == SAS_TASK_COMPLETE &&
John Garry1af1b802016-02-25 17:42:10 +0800664 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
John Garry0efff302015-11-18 00:50:56 +0800665 res = TMF_RESP_FUNC_COMPLETE;
666 break;
667 }
668
669 if (task->task_status.resp == SAS_TASK_COMPLETE &&
670 task->task_status.stat == SAS_DATA_UNDERRUN) {
671 /* no error, but return the number of bytes of
672 * underrun
673 */
674 dev_warn(dev, "abort tmf: task to dev %016llx "
675 "resp: 0x%x sts 0x%x underrun\n",
676 SAS_ADDR(device->sas_addr),
677 task->task_status.resp,
678 task->task_status.stat);
679 res = task->task_status.residual;
680 break;
681 }
682
683 if (task->task_status.resp == SAS_TASK_COMPLETE &&
684 task->task_status.stat == SAS_DATA_OVERRUN) {
685 dev_warn(dev, "abort tmf: blocked task error\n");
686 res = -EMSGSIZE;
687 break;
688 }
689
690 dev_warn(dev, "abort tmf: task to dev "
691 "%016llx resp: 0x%x status 0x%x\n",
692 SAS_ADDR(device->sas_addr), task->task_status.resp,
693 task->task_status.stat);
694 sas_free_task(task);
695 task = NULL;
696 }
697ex_err:
698 WARN_ON(retry == TASK_RETRY);
699 sas_free_task(task);
700 return res;
701}
702
703static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
704 u8 *lun, struct hisi_sas_tmf_task *tmf)
705{
706 struct sas_ssp_task ssp_task;
707
708 if (!(device->tproto & SAS_PROTOCOL_SSP))
709 return TMF_RESP_FUNC_ESUPP;
710
711 memcpy(ssp_task.LUN, lun, 8);
712
713 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
714 sizeof(ssp_task), tmf);
715}
716
717static int hisi_sas_abort_task(struct sas_task *task)
718{
719 struct scsi_lun lun;
720 struct hisi_sas_tmf_task tmf_task;
721 struct domain_device *device = task->dev;
722 struct hisi_sas_device *sas_dev = device->lldd_dev;
723 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
724 struct device *dev = &hisi_hba->pdev->dev;
725 int rc = TMF_RESP_FUNC_FAILED;
726 unsigned long flags;
727
728 if (!sas_dev) {
729 dev_warn(dev, "Device has been removed\n");
730 return TMF_RESP_FUNC_FAILED;
731 }
732
733 spin_lock_irqsave(&task->task_state_lock, flags);
734 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
735 spin_unlock_irqrestore(&task->task_state_lock, flags);
736 rc = TMF_RESP_FUNC_COMPLETE;
737 goto out;
738 }
739
740 spin_unlock_irqrestore(&task->task_state_lock, flags);
741 sas_dev->dev_status = HISI_SAS_DEV_EH;
742 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
743 struct scsi_cmnd *cmnd = task->uldd_task;
744 struct hisi_sas_slot *slot = task->lldd_task;
745 u32 tag = slot->idx;
746
747 int_to_scsilun(cmnd->device->lun, &lun);
748 tmf_task.tmf = TMF_ABORT_TASK;
749 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
750
751 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
752 &tmf_task);
753
754 /* if successful, clear the task and callback forwards.*/
755 if (rc == TMF_RESP_FUNC_COMPLETE) {
756 if (task->lldd_task) {
757 struct hisi_sas_slot *slot;
758
759 slot = &hisi_hba->slot_info
760 [tmf_task.tag_of_task_to_be_managed];
761 spin_lock_irqsave(&hisi_hba->lock, flags);
762 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
763 spin_unlock_irqrestore(&hisi_hba->lock, flags);
764 }
765 }
766
767 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
768 task->task_proto & SAS_PROTOCOL_STP) {
769 if (task->dev->dev_type == SAS_SATA_DEV) {
770 struct hisi_slot_info *slot = task->lldd_task;
771
772 dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
773 hisi_hba, task, slot);
774 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
775 rc = TMF_RESP_FUNC_COMPLETE;
776 goto out;
777 }
778
779 }
780
781out:
782 if (rc != TMF_RESP_FUNC_COMPLETE)
783 dev_notice(dev, "abort task: rc=%d\n", rc);
784 return rc;
785}
786
787static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
788{
789 struct hisi_sas_tmf_task tmf_task;
790 int rc = TMF_RESP_FUNC_FAILED;
791
792 tmf_task.tmf = TMF_ABORT_TASK_SET;
793 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
794
795 return rc;
796}
797
798static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
799{
800 int rc = TMF_RESP_FUNC_FAILED;
801 struct hisi_sas_tmf_task tmf_task;
802
803 tmf_task.tmf = TMF_CLEAR_ACA;
804 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
805
806 return rc;
807}
808
809static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
810{
811 struct sas_phy *phy = sas_get_local_phy(device);
812 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
813 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
814 rc = sas_phy_reset(phy, reset_type);
815 sas_put_local_phy(phy);
816 msleep(2000);
817 return rc;
818}
819
820static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
821{
822 struct hisi_sas_device *sas_dev = device->lldd_dev;
823 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
824 unsigned long flags;
825 int rc = TMF_RESP_FUNC_FAILED;
826
827 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
828 return TMF_RESP_FUNC_FAILED;
829 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
830
831 rc = hisi_sas_debug_I_T_nexus_reset(device);
832
833 spin_lock_irqsave(&hisi_hba->lock, flags);
834 hisi_sas_release_task(hisi_hba, device);
835 spin_unlock_irqrestore(&hisi_hba->lock, flags);
836
837 return 0;
838}
839
840static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
841{
842 struct hisi_sas_tmf_task tmf_task;
843 struct hisi_sas_device *sas_dev = device->lldd_dev;
844 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
845 struct device *dev = &hisi_hba->pdev->dev;
846 unsigned long flags;
847 int rc = TMF_RESP_FUNC_FAILED;
848
849 tmf_task.tmf = TMF_LU_RESET;
850 sas_dev->dev_status = HISI_SAS_DEV_EH;
851 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
852 if (rc == TMF_RESP_FUNC_COMPLETE) {
853 spin_lock_irqsave(&hisi_hba->lock, flags);
854 hisi_sas_release_task(hisi_hba, device);
855 spin_unlock_irqrestore(&hisi_hba->lock, flags);
856 }
857
858 /* If failed, fall-through I_T_Nexus reset */
859 dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
860 sas_dev->device_id, rc);
861 return rc;
862}
863
864static int hisi_sas_query_task(struct sas_task *task)
865{
866 struct scsi_lun lun;
867 struct hisi_sas_tmf_task tmf_task;
868 int rc = TMF_RESP_FUNC_FAILED;
869
870 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
871 struct scsi_cmnd *cmnd = task->uldd_task;
872 struct domain_device *device = task->dev;
873 struct hisi_sas_slot *slot = task->lldd_task;
874 u32 tag = slot->idx;
875
876 int_to_scsilun(cmnd->device->lun, &lun);
877 tmf_task.tmf = TMF_QUERY_TASK;
878 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
879
880 rc = hisi_sas_debug_issue_ssp_tmf(device,
881 lun.scsi_lun,
882 &tmf_task);
883 switch (rc) {
884 /* The task is still in Lun, release it then */
885 case TMF_RESP_FUNC_SUCC:
886 /* The task is not in Lun or failed, reset the phy */
887 case TMF_RESP_FUNC_FAILED:
888 case TMF_RESP_FUNC_COMPLETE:
889 break;
890 }
891 }
892 return rc;
893}
894
John Garry184a4632015-11-18 00:50:52 +0800895static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
896{
897 hisi_sas_port_notify_formed(sas_phy);
898}
899
900static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
901{
902 hisi_sas_port_notify_deformed(sas_phy);
903}
904
905static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
906{
907 phy->phy_attached = 0;
908 phy->phy_type = 0;
909 phy->port = NULL;
910}
911
912void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
913{
914 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
915 struct asd_sas_phy *sas_phy = &phy->sas_phy;
916 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
917
918 if (rdy) {
919 /* Phy down but ready */
920 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
921 hisi_sas_port_notify_formed(sas_phy);
922 } else {
923 struct hisi_sas_port *port = phy->port;
924
925 /* Phy down and not ready */
926 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
927 sas_phy_disconnected(sas_phy);
928
929 if (port) {
930 if (phy->phy_type & PORT_TYPE_SAS) {
931 int port_id = port->id;
932
933 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
934 port_id))
935 port->port_attached = 0;
936 } else if (phy->phy_type & PORT_TYPE_SATA)
937 port->port_attached = 0;
938 }
939 hisi_sas_phy_disconnected(phy);
940 }
941}
942EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
943
John Garrye8899fa2015-11-18 00:50:30 +0800944static struct scsi_transport_template *hisi_sas_stt;
945
John Garry7eb78692015-11-18 00:50:31 +0800946static struct scsi_host_template hisi_sas_sht = {
947 .module = THIS_MODULE,
948 .name = DRV_NAME,
949 .queuecommand = sas_queuecommand,
950 .target_alloc = sas_target_alloc,
951 .slave_configure = sas_slave_configure,
John Garry701f75e2015-11-18 00:50:55 +0800952 .scan_finished = hisi_sas_scan_finished,
953 .scan_start = hisi_sas_scan_start,
John Garry7eb78692015-11-18 00:50:31 +0800954 .change_queue_depth = sas_change_queue_depth,
955 .bios_param = sas_bios_param,
956 .can_queue = 1,
957 .this_id = -1,
958 .sg_tablesize = SG_ALL,
959 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
960 .use_clustering = ENABLE_CLUSTERING,
961 .eh_device_reset_handler = sas_eh_device_reset_handler,
962 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
963 .target_destroy = sas_target_destroy,
964 .ioctl = sas_ioctl,
965};
966
John Garrye8899fa2015-11-18 00:50:30 +0800967static struct sas_domain_function_template hisi_sas_transport_ops = {
John Garryabda97c2015-11-18 00:50:51 +0800968 .lldd_dev_found = hisi_sas_dev_found,
969 .lldd_dev_gone = hisi_sas_dev_gone,
John Garry42e7a692015-11-18 00:50:49 +0800970 .lldd_execute_task = hisi_sas_queue_command,
John Garrye4189d52015-11-18 00:50:57 +0800971 .lldd_control_phy = hisi_sas_control_phy,
John Garry0efff302015-11-18 00:50:56 +0800972 .lldd_abort_task = hisi_sas_abort_task,
973 .lldd_abort_task_set = hisi_sas_abort_task_set,
974 .lldd_clear_aca = hisi_sas_clear_aca,
975 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
976 .lldd_lu_reset = hisi_sas_lu_reset,
977 .lldd_query_task = hisi_sas_query_task,
John Garry184a4632015-11-18 00:50:52 +0800978 .lldd_port_formed = hisi_sas_port_formed,
979 .lldd_port_deformed = hisi_sas_port_deformed,
John Garrye8899fa2015-11-18 00:50:30 +0800980};
981
John Garry6be6de12015-11-18 00:50:34 +0800982static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
983{
John Garry6be6de12015-11-18 00:50:34 +0800984 struct platform_device *pdev = hisi_hba->pdev;
985 struct device *dev = &pdev->dev;
John Garrya8d547b2016-01-26 02:47:03 +0800986 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
John Garry6be6de12015-11-18 00:50:34 +0800987
John Garryfa42d802015-11-18 00:50:43 +0800988 spin_lock_init(&hisi_hba->lock);
John Garry976867e2015-11-18 00:50:42 +0800989 for (i = 0; i < hisi_hba->n_phy; i++) {
990 hisi_sas_phy_init(hisi_hba, i);
991 hisi_hba->port[i].port_attached = 0;
992 hisi_hba->port[i].id = -1;
993 INIT_LIST_HEAD(&hisi_hba->port[i].list);
994 }
995
John Garryaf740db2015-11-18 00:50:41 +0800996 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
997 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
998 hisi_hba->devices[i].device_id = i;
999 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
1000 }
1001
John Garry6be6de12015-11-18 00:50:34 +08001002 for (i = 0; i < hisi_hba->queue_count; i++) {
John Garry9101a072015-11-18 00:50:37 +08001003 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1004
1005 /* Completion queue structure */
1006 cq->id = i;
1007 cq->hisi_hba = hisi_hba;
1008
John Garry6be6de12015-11-18 00:50:34 +08001009 /* Delivery queue */
1010 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1011 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1012 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1013 if (!hisi_hba->cmd_hdr[i])
1014 goto err_out;
1015 memset(hisi_hba->cmd_hdr[i], 0, s);
1016
1017 /* Completion queue */
1018 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1019 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1020 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1021 if (!hisi_hba->complete_hdr[i])
1022 goto err_out;
1023 memset(hisi_hba->complete_hdr[i], 0, s);
1024 }
1025
1026 s = HISI_SAS_STATUS_BUF_SZ;
1027 hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1028 dev, s, 16, 0);
1029 if (!hisi_hba->status_buffer_pool)
1030 goto err_out;
1031
1032 s = HISI_SAS_COMMAND_TABLE_SZ;
1033 hisi_hba->command_table_pool = dma_pool_create("command_table",
1034 dev, s, 16, 0);
1035 if (!hisi_hba->command_table_pool)
1036 goto err_out;
1037
1038 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1039 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1040 GFP_KERNEL);
1041 if (!hisi_hba->itct)
1042 goto err_out;
1043
1044 memset(hisi_hba->itct, 0, s);
1045
John Garrya8d547b2016-01-26 02:47:03 +08001046 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
John Garry6be6de12015-11-18 00:50:34 +08001047 sizeof(struct hisi_sas_slot),
1048 GFP_KERNEL);
1049 if (!hisi_hba->slot_info)
1050 goto err_out;
1051
John Garrya8d547b2016-01-26 02:47:03 +08001052 s = max_command_entries * sizeof(struct hisi_sas_iost);
John Garry6be6de12015-11-18 00:50:34 +08001053 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1054 GFP_KERNEL);
1055 if (!hisi_hba->iost)
1056 goto err_out;
1057
1058 memset(hisi_hba->iost, 0, s);
1059
John Garrya8d547b2016-01-26 02:47:03 +08001060 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
John Garry6be6de12015-11-18 00:50:34 +08001061 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1062 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1063 if (!hisi_hba->breakpoint)
1064 goto err_out;
1065
1066 memset(hisi_hba->breakpoint, 0, s);
1067
John Garrya8d547b2016-01-26 02:47:03 +08001068 hisi_hba->slot_index_count = max_command_entries;
John Garry257efd12015-11-18 00:50:36 +08001069 s = hisi_hba->slot_index_count / sizeof(unsigned long);
1070 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1071 if (!hisi_hba->slot_index_tags)
1072 goto err_out;
1073
John Garry6be6de12015-11-18 00:50:34 +08001074 hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1075 sizeof(struct hisi_sas_sge_page), 16, 0);
1076 if (!hisi_hba->sge_page_pool)
1077 goto err_out;
1078
1079 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1080 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1081 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1082 if (!hisi_hba->initial_fis)
1083 goto err_out;
1084 memset(hisi_hba->initial_fis, 0, s);
1085
John Garrya8d547b2016-01-26 02:47:03 +08001086 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
John Garry6be6de12015-11-18 00:50:34 +08001087 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1088 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1089 if (!hisi_hba->sata_breakpoint)
1090 goto err_out;
1091 memset(hisi_hba->sata_breakpoint, 0, s);
1092
John Garry257efd12015-11-18 00:50:36 +08001093 hisi_sas_slot_index_init(hisi_hba);
1094
John Garry7e9080e2015-11-18 00:50:40 +08001095 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1096 if (!hisi_hba->wq) {
1097 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1098 goto err_out;
1099 }
1100
John Garry6be6de12015-11-18 00:50:34 +08001101 return 0;
1102err_out:
1103 return -ENOMEM;
1104}
1105
John Garry89d53322015-11-18 00:50:35 +08001106static void hisi_sas_free(struct hisi_hba *hisi_hba)
1107{
1108 struct device *dev = &hisi_hba->pdev->dev;
John Garrya8d547b2016-01-26 02:47:03 +08001109 int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
John Garry89d53322015-11-18 00:50:35 +08001110
1111 for (i = 0; i < hisi_hba->queue_count; i++) {
1112 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1113 if (hisi_hba->cmd_hdr[i])
1114 dma_free_coherent(dev, s,
1115 hisi_hba->cmd_hdr[i],
1116 hisi_hba->cmd_hdr_dma[i]);
1117
1118 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1119 if (hisi_hba->complete_hdr[i])
1120 dma_free_coherent(dev, s,
1121 hisi_hba->complete_hdr[i],
1122 hisi_hba->complete_hdr_dma[i]);
1123 }
1124
1125 dma_pool_destroy(hisi_hba->status_buffer_pool);
1126 dma_pool_destroy(hisi_hba->command_table_pool);
1127 dma_pool_destroy(hisi_hba->sge_page_pool);
1128
1129 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1130 if (hisi_hba->itct)
1131 dma_free_coherent(dev, s,
1132 hisi_hba->itct, hisi_hba->itct_dma);
1133
John Garrya8d547b2016-01-26 02:47:03 +08001134 s = max_command_entries * sizeof(struct hisi_sas_iost);
John Garry89d53322015-11-18 00:50:35 +08001135 if (hisi_hba->iost)
1136 dma_free_coherent(dev, s,
1137 hisi_hba->iost, hisi_hba->iost_dma);
1138
John Garrya8d547b2016-01-26 02:47:03 +08001139 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
John Garry89d53322015-11-18 00:50:35 +08001140 if (hisi_hba->breakpoint)
1141 dma_free_coherent(dev, s,
1142 hisi_hba->breakpoint,
1143 hisi_hba->breakpoint_dma);
1144
1145
1146 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1147 if (hisi_hba->initial_fis)
1148 dma_free_coherent(dev, s,
1149 hisi_hba->initial_fis,
1150 hisi_hba->initial_fis_dma);
1151
John Garrya8d547b2016-01-26 02:47:03 +08001152 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
John Garry89d53322015-11-18 00:50:35 +08001153 if (hisi_hba->sata_breakpoint)
1154 dma_free_coherent(dev, s,
1155 hisi_hba->sata_breakpoint,
1156 hisi_hba->sata_breakpoint_dma);
1157
John Garry7e9080e2015-11-18 00:50:40 +08001158 if (hisi_hba->wq)
1159 destroy_workqueue(hisi_hba->wq);
John Garry89d53322015-11-18 00:50:35 +08001160}
John Garry6be6de12015-11-18 00:50:34 +08001161
John Garry7eb78692015-11-18 00:50:31 +08001162static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1163 const struct hisi_sas_hw *hw)
1164{
John Garrye26b2f42015-11-18 00:50:32 +08001165 struct resource *res;
John Garry7eb78692015-11-18 00:50:31 +08001166 struct Scsi_Host *shost;
1167 struct hisi_hba *hisi_hba;
1168 struct device *dev = &pdev->dev;
John Garrye26b2f42015-11-18 00:50:32 +08001169 struct device_node *np = pdev->dev.of_node;
John Garry7eb78692015-11-18 00:50:31 +08001170
1171 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1172 if (!shost)
1173 goto err_out;
1174 hisi_hba = shost_priv(shost);
1175
1176 hisi_hba->hw = hw;
1177 hisi_hba->pdev = pdev;
1178 hisi_hba->shost = shost;
1179 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1180
John Garryfa42d802015-11-18 00:50:43 +08001181 init_timer(&hisi_hba->timer);
1182
John Garry4d558c72016-02-04 02:26:08 +08001183 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
1184 SAS_ADDR_SIZE))
John Garrye26b2f42015-11-18 00:50:32 +08001185 goto err_out;
1186
John Garry4d558c72016-02-04 02:26:08 +08001187 if (np) {
1188 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
1189 "hisilicon,sas-syscon");
1190 if (IS_ERR(hisi_hba->ctrl))
1191 goto err_out;
1192
1193 if (device_property_read_u32(dev, "ctrl-reset-reg",
1194 &hisi_hba->ctrl_reset_reg))
1195 goto err_out;
1196
1197 if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
1198 &hisi_hba->ctrl_reset_sts_reg))
1199 goto err_out;
1200
1201 if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
1202 &hisi_hba->ctrl_clock_ena_reg))
1203 goto err_out;
1204 }
1205
1206 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
John Garrye26b2f42015-11-18 00:50:32 +08001207 goto err_out;
1208
John Garry4d558c72016-02-04 02:26:08 +08001209 if (device_property_read_u32(dev, "queue-count",
1210 &hisi_hba->queue_count))
John Garrye26b2f42015-11-18 00:50:32 +08001211 goto err_out;
1212
John Garrye26b2f42015-11-18 00:50:32 +08001213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1214 hisi_hba->regs = devm_ioremap_resource(dev, res);
1215 if (IS_ERR(hisi_hba->regs))
1216 goto err_out;
1217
John Garry89d53322015-11-18 00:50:35 +08001218 if (hisi_sas_alloc(hisi_hba, shost)) {
1219 hisi_sas_free(hisi_hba);
John Garry6be6de12015-11-18 00:50:34 +08001220 goto err_out;
John Garry89d53322015-11-18 00:50:35 +08001221 }
John Garry6be6de12015-11-18 00:50:34 +08001222
John Garry7eb78692015-11-18 00:50:31 +08001223 return shost;
1224err_out:
1225 dev_err(dev, "shost alloc failed\n");
1226 return NULL;
1227}
1228
John Garry5d742422015-11-18 00:50:38 +08001229static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1230{
1231 int i;
1232
1233 for (i = 0; i < hisi_hba->n_phy; i++)
1234 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1235 hisi_hba->sas_addr,
1236 SAS_ADDR_SIZE);
1237}
1238
John Garry7eb78692015-11-18 00:50:31 +08001239int hisi_sas_probe(struct platform_device *pdev,
1240 const struct hisi_sas_hw *hw)
1241{
1242 struct Scsi_Host *shost;
1243 struct hisi_hba *hisi_hba;
1244 struct device *dev = &pdev->dev;
1245 struct asd_sas_phy **arr_phy;
1246 struct asd_sas_port **arr_port;
1247 struct sas_ha_struct *sha;
1248 int rc, phy_nr, port_nr, i;
1249
1250 shost = hisi_sas_shost_alloc(pdev, hw);
1251 if (!shost) {
1252 rc = -ENOMEM;
1253 goto err_out_ha;
1254 }
1255
1256 sha = SHOST_TO_SAS_HA(shost);
1257 hisi_hba = shost_priv(shost);
1258 platform_set_drvdata(pdev, sha);
John Garry50cb9162015-11-18 00:50:39 +08001259
1260 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1261 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1262 dev_err(dev, "No usable DMA addressing method\n");
1263 rc = -EIO;
1264 goto err_out_ha;
1265 }
1266
John Garry7eb78692015-11-18 00:50:31 +08001267 phy_nr = port_nr = hisi_hba->n_phy;
1268
1269 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1270 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1271 if (!arr_phy || !arr_port)
1272 return -ENOMEM;
1273
1274 sha->sas_phy = arr_phy;
1275 sha->sas_port = arr_port;
1276 sha->core.shost = shost;
1277 sha->lldd_ha = hisi_hba;
1278
1279 shost->transportt = hisi_sas_stt;
1280 shost->max_id = HISI_SAS_MAX_DEVICES;
1281 shost->max_lun = ~0;
1282 shost->max_channel = 1;
1283 shost->max_cmd_len = 16;
1284 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
John Garrya8d547b2016-01-26 02:47:03 +08001285 shost->can_queue = hisi_hba->hw->max_command_entries;
1286 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
John Garry7eb78692015-11-18 00:50:31 +08001287
1288 sha->sas_ha_name = DRV_NAME;
1289 sha->dev = &hisi_hba->pdev->dev;
1290 sha->lldd_module = THIS_MODULE;
1291 sha->sas_addr = &hisi_hba->sas_addr[0];
1292 sha->num_phys = hisi_hba->n_phy;
1293 sha->core.shost = hisi_hba->shost;
1294
1295 for (i = 0; i < hisi_hba->n_phy; i++) {
1296 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1297 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1298 }
1299
John Garry5d742422015-11-18 00:50:38 +08001300 hisi_sas_init_add(hisi_hba);
1301
John Garry8ff1d572015-11-18 00:50:46 +08001302 rc = hisi_hba->hw->hw_init(hisi_hba);
1303 if (rc)
1304 goto err_out_ha;
1305
John Garry7eb78692015-11-18 00:50:31 +08001306 rc = scsi_add_host(shost, &pdev->dev);
1307 if (rc)
1308 goto err_out_ha;
1309
1310 rc = sas_register_ha(sha);
1311 if (rc)
1312 goto err_out_register_ha;
1313
1314 scsi_scan_host(shost);
1315
1316 return 0;
1317
1318err_out_register_ha:
1319 scsi_remove_host(shost);
1320err_out_ha:
1321 kfree(shost);
1322 return rc;
1323}
1324EXPORT_SYMBOL_GPL(hisi_sas_probe);
1325
John Garry89d53322015-11-18 00:50:35 +08001326int hisi_sas_remove(struct platform_device *pdev)
1327{
1328 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1329 struct hisi_hba *hisi_hba = sha->lldd_ha;
1330
1331 scsi_remove_host(sha->core.shost);
1332 sas_unregister_ha(sha);
1333 sas_remove_host(sha->core.shost);
1334
1335 hisi_sas_free(hisi_hba);
1336 return 0;
1337}
1338EXPORT_SYMBOL_GPL(hisi_sas_remove);
1339
John Garrye8899fa2015-11-18 00:50:30 +08001340static __init int hisi_sas_init(void)
1341{
1342 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1343
1344 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1345 if (!hisi_sas_stt)
1346 return -ENOMEM;
1347
1348 return 0;
1349}
1350
1351static __exit void hisi_sas_exit(void)
1352{
1353 sas_release_transport(hisi_sas_stt);
1354}
1355
1356module_init(hisi_sas_init);
1357module_exit(hisi_sas_exit);
1358
1359MODULE_VERSION(DRV_VERSION);
1360MODULE_LICENSE("GPL");
1361MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1362MODULE_DESCRIPTION("HISILICON SAS controller driver");
1363MODULE_ALIAS("platform:" DRV_NAME);