blob: 137762515aa93d314709044f6c1e1595f274770d [file] [log] [blame]
John Garrye8899fa2015-11-18 00:50:30 +08001/*
2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 */
11
12#include "hisi_sas.h"
13#define DRV_NAME "hisi_sas"
14
John Garryabda97c2015-11-18 00:50:51 +080015#define DEV_IS_EXPANDER(type) \
16 ((type == SAS_EDGE_EXPANDER_DEVICE) || \
17 (type == SAS_FANOUT_EXPANDER_DEVICE))
John Garry42e7a692015-11-18 00:50:49 +080018
19#define DEV_IS_GONE(dev) \
20 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
21
22static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
23{
24 return device->port->ha->lldd_ha;
25}
26
John Garry257efd12015-11-18 00:50:36 +080027static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
28{
29 void *bitmap = hisi_hba->slot_index_tags;
30
31 clear_bit(slot_idx, bitmap);
32}
33
John Garry42e7a692015-11-18 00:50:49 +080034static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
35{
36 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
37}
38
39static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
40{
41 void *bitmap = hisi_hba->slot_index_tags;
42
43 set_bit(slot_idx, bitmap);
44}
45
46static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
47{
48 unsigned int index;
49 void *bitmap = hisi_hba->slot_index_tags;
50
51 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
52 if (index >= hisi_hba->slot_index_count)
53 return -SAS_QUEUE_FULL;
54 hisi_sas_slot_index_set(hisi_hba, index);
55 *slot_idx = index;
56 return 0;
57}
58
John Garry257efd12015-11-18 00:50:36 +080059static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
60{
61 int i;
62
63 for (i = 0; i < hisi_hba->slot_index_count; ++i)
64 hisi_sas_slot_index_clear(hisi_hba, i);
65}
John Garry27a3f222015-11-18 00:50:50 +080066
67void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
68 struct hisi_sas_slot *slot)
69{
70 struct device *dev = &hisi_hba->pdev->dev;
71
72 if (!slot->task)
73 return;
74
75 if (!sas_protocol_ata(task->task_proto))
76 if (slot->n_elem)
77 dma_unmap_sg(dev, task->scatter, slot->n_elem,
78 task->data_dir);
79
80 if (slot->command_table)
81 dma_pool_free(hisi_hba->command_table_pool,
82 slot->command_table, slot->command_table_dma);
83
84 if (slot->status_buffer)
85 dma_pool_free(hisi_hba->status_buffer_pool,
86 slot->status_buffer, slot->status_buffer_dma);
87
88 if (slot->sge_page)
89 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
90 slot->sge_page_dma);
91
92 list_del_init(&slot->entry);
93 task->lldd_task = NULL;
94 slot->task = NULL;
95 slot->port = NULL;
96 hisi_sas_slot_index_free(hisi_hba, slot->idx);
97 memset(slot, 0, sizeof(*slot));
98}
99EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
100
John Garry66ee9992015-11-18 00:50:54 +0800101static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
102 struct hisi_sas_slot *slot)
103{
104 return hisi_hba->hw->prep_smp(hisi_hba, slot);
105}
106
John Garry42e7a692015-11-18 00:50:49 +0800107static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
108 struct hisi_sas_slot *slot, int is_tmf,
109 struct hisi_sas_tmf_task *tmf)
110{
111 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
112}
113
114static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
115 int is_tmf, struct hisi_sas_tmf_task *tmf,
116 int *pass)
117{
118 struct domain_device *device = task->dev;
119 struct hisi_sas_device *sas_dev = device->lldd_dev;
120 struct hisi_sas_port *port;
121 struct hisi_sas_slot *slot;
122 struct hisi_sas_cmd_hdr *cmd_hdr_base;
123 struct device *dev = &hisi_hba->pdev->dev;
124 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
125
126 if (!device->port) {
127 struct task_status_struct *ts = &task->task_status;
128
129 ts->resp = SAS_TASK_UNDELIVERED;
130 ts->stat = SAS_PHY_DOWN;
131 /*
132 * libsas will use dev->port, should
133 * not call task_done for sata
134 */
135 if (device->dev_type != SAS_SATA_DEV)
136 task->task_done(task);
137 return 0;
138 }
139
140 if (DEV_IS_GONE(sas_dev)) {
141 if (sas_dev)
142 dev_info(dev, "task prep: device %llu not ready\n",
143 sas_dev->device_id);
144 else
145 dev_info(dev, "task prep: device %016llx not ready\n",
146 SAS_ADDR(device->sas_addr));
147
148 rc = SAS_PHY_DOWN;
149 return rc;
150 }
151 port = device->port->lldd_port;
152 if (port && !port->port_attached && !tmf) {
153 if (sas_protocol_ata(task->task_proto)) {
154 struct task_status_struct *ts = &task->task_status;
155
156 dev_info(dev,
157 "task prep: SATA/STP port%d not attach device\n",
158 device->port->id);
159 ts->resp = SAS_TASK_COMPLETE;
160 ts->stat = SAS_PHY_DOWN;
161 task->task_done(task);
162 } else {
163 struct task_status_struct *ts = &task->task_status;
164
165 dev_info(dev,
166 "task prep: SAS port%d does not attach device\n",
167 device->port->id);
168 ts->resp = SAS_TASK_UNDELIVERED;
169 ts->stat = SAS_PHY_DOWN;
170 task->task_done(task);
171 }
172 return 0;
173 }
174
175 if (!sas_protocol_ata(task->task_proto)) {
176 if (task->num_scatter) {
177 n_elem = dma_map_sg(dev, task->scatter,
178 task->num_scatter, task->data_dir);
179 if (!n_elem) {
180 rc = -ENOMEM;
181 goto prep_out;
182 }
183 }
184 } else
185 n_elem = task->num_scatter;
186
187 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
188 if (rc)
189 goto err_out;
190 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
191 &dlvry_queue_slot);
192 if (rc)
193 goto err_out_tag;
194
195 slot = &hisi_hba->slot_info[slot_idx];
196 memset(slot, 0, sizeof(struct hisi_sas_slot));
197
198 slot->idx = slot_idx;
199 slot->n_elem = n_elem;
200 slot->dlvry_queue = dlvry_queue;
201 slot->dlvry_queue_slot = dlvry_queue_slot;
202 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
203 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
204 slot->task = task;
205 slot->port = port;
206 task->lldd_task = slot;
207
208 slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
209 GFP_ATOMIC,
210 &slot->status_buffer_dma);
211 if (!slot->status_buffer)
212 goto err_out_slot_buf;
213 memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
214
215 slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
216 GFP_ATOMIC,
217 &slot->command_table_dma);
218 if (!slot->command_table)
219 goto err_out_status_buf;
220 memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
221 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
222
223 switch (task->task_proto) {
John Garry66ee9992015-11-18 00:50:54 +0800224 case SAS_PROTOCOL_SMP:
225 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
226 break;
John Garry42e7a692015-11-18 00:50:49 +0800227 case SAS_PROTOCOL_SSP:
228 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
229 break;
230 case SAS_PROTOCOL_SATA:
231 case SAS_PROTOCOL_STP:
232 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
233 default:
234 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
235 task->task_proto);
236 rc = -EINVAL;
237 break;
238 }
239
240 if (rc) {
241 dev_err(dev, "task prep: rc = 0x%x\n", rc);
242 if (slot->sge_page)
243 goto err_out_sge;
244 goto err_out_command_table;
245 }
246
247 list_add_tail(&slot->entry, &port->list);
248 spin_lock(&task->task_state_lock);
249 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
250 spin_unlock(&task->task_state_lock);
251
252 hisi_hba->slot_prep = slot;
253
254 sas_dev->running_req++;
255 ++(*pass);
256
257 return rc;
258
259err_out_sge:
260 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
261 slot->sge_page_dma);
262err_out_command_table:
263 dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
264 slot->command_table_dma);
265err_out_status_buf:
266 dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
267 slot->status_buffer_dma);
268err_out_slot_buf:
269 /* Nothing to be done */
270err_out_tag:
271 hisi_sas_slot_index_free(hisi_hba, slot_idx);
272err_out:
273 dev_err(dev, "task prep: failed[%d]!\n", rc);
274 if (!sas_protocol_ata(task->task_proto))
275 if (n_elem)
276 dma_unmap_sg(dev, task->scatter, n_elem,
277 task->data_dir);
278prep_out:
279 return rc;
280}
281
282static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
283 int is_tmf, struct hisi_sas_tmf_task *tmf)
284{
285 u32 rc;
286 u32 pass = 0;
287 unsigned long flags;
288 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
289 struct device *dev = &hisi_hba->pdev->dev;
290
291 /* protect task_prep and start_delivery sequence */
292 spin_lock_irqsave(&hisi_hba->lock, flags);
293 rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
294 if (rc)
295 dev_err(dev, "task exec: failed[%d]!\n", rc);
296
297 if (likely(pass))
298 hisi_hba->hw->start_delivery(hisi_hba);
299 spin_unlock_irqrestore(&hisi_hba->lock, flags);
300
301 return rc;
302}
John Garry257efd12015-11-18 00:50:36 +0800303
John Garry66139922015-11-18 00:50:48 +0800304static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
305{
306 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
307 struct asd_sas_phy *sas_phy = &phy->sas_phy;
308 struct sas_ha_struct *sas_ha;
309
310 if (!phy->phy_attached)
311 return;
312
313 sas_ha = &hisi_hba->sha;
314 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
315
316 if (sas_phy->phy) {
317 struct sas_phy *sphy = sas_phy->phy;
318
319 sphy->negotiated_linkrate = sas_phy->linkrate;
320 sphy->minimum_linkrate = phy->minimum_linkrate;
321 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
322 sphy->maximum_linkrate = phy->maximum_linkrate;
323 }
324
325 if (phy->phy_type & PORT_TYPE_SAS) {
326 struct sas_identify_frame *id;
327
328 id = (struct sas_identify_frame *)phy->frame_rcvd;
329 id->dev_type = phy->identify.device_type;
330 id->initiator_bits = SAS_PROTOCOL_ALL;
331 id->target_bits = phy->identify.target_port_protocols;
332 } else if (phy->phy_type & PORT_TYPE_SATA) {
333 /*Nothing*/
334 }
335
336 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
337 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
338}
339
John Garryabda97c2015-11-18 00:50:51 +0800340static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
341{
342 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
343 struct hisi_sas_device *sas_dev = NULL;
344 int i;
345
346 spin_lock(&hisi_hba->lock);
347 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
348 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
349 hisi_hba->devices[i].device_id = i;
350 sas_dev = &hisi_hba->devices[i];
351 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
352 sas_dev->dev_type = device->dev_type;
353 sas_dev->hisi_hba = hisi_hba;
354 sas_dev->sas_device = device;
355 break;
356 }
357 }
358 spin_unlock(&hisi_hba->lock);
359
360 return sas_dev;
361}
362
363static int hisi_sas_dev_found(struct domain_device *device)
364{
365 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
366 struct domain_device *parent_dev = device->parent;
367 struct hisi_sas_device *sas_dev;
368 struct device *dev = &hisi_hba->pdev->dev;
369
370 sas_dev = hisi_sas_alloc_dev(device);
371 if (!sas_dev) {
372 dev_err(dev, "fail alloc dev: max support %d devices\n",
373 HISI_SAS_MAX_DEVICES);
374 return -EINVAL;
375 }
376
377 device->lldd_dev = sas_dev;
378 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
379
380 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
381 int phy_no;
382 u8 phy_num = parent_dev->ex_dev.num_phys;
383 struct ex_phy *phy;
384
385 for (phy_no = 0; phy_no < phy_num; phy_no++) {
386 phy = &parent_dev->ex_dev.ex_phy[phy_no];
387 if (SAS_ADDR(phy->attached_sas_addr) ==
388 SAS_ADDR(device->sas_addr)) {
389 sas_dev->attached_phy = phy_no;
390 break;
391 }
392 }
393
394 if (phy_no == phy_num) {
395 dev_info(dev, "dev found: no attached "
396 "dev:%016llx at ex:%016llx\n",
397 SAS_ADDR(device->sas_addr),
398 SAS_ADDR(parent_dev->sas_addr));
399 return -EINVAL;
400 }
401 }
402
403 return 0;
404}
405
John Garry701f75e2015-11-18 00:50:55 +0800406static void hisi_sas_scan_start(struct Scsi_Host *shost)
407{
408 struct hisi_hba *hisi_hba = shost_priv(shost);
409 int i;
410
411 for (i = 0; i < hisi_hba->n_phy; ++i)
412 hisi_sas_bytes_dmaed(hisi_hba, i);
413
414 hisi_hba->scan_finished = 1;
415}
416
417static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
418{
419 struct hisi_hba *hisi_hba = shost_priv(shost);
420 struct sas_ha_struct *sha = &hisi_hba->sha;
421
422 if (hisi_hba->scan_finished == 0)
423 return 0;
424
425 sas_drain_work(sha);
426 return 1;
427}
428
John Garry66139922015-11-18 00:50:48 +0800429static void hisi_sas_phyup_work(struct work_struct *work)
430{
431 struct hisi_sas_phy *phy =
432 container_of(work, struct hisi_sas_phy, phyup_ws);
433 struct hisi_hba *hisi_hba = phy->hisi_hba;
434 struct asd_sas_phy *sas_phy = &phy->sas_phy;
435 int phy_no = sas_phy->id;
436
437 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
438 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
439}
John Garry976867e2015-11-18 00:50:42 +0800440
441static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
442{
443 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
444 struct asd_sas_phy *sas_phy = &phy->sas_phy;
445
446 phy->hisi_hba = hisi_hba;
447 phy->port = NULL;
448 init_timer(&phy->timer);
449 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
450 sas_phy->class = SAS;
451 sas_phy->iproto = SAS_PROTOCOL_ALL;
452 sas_phy->tproto = 0;
453 sas_phy->type = PHY_TYPE_PHYSICAL;
454 sas_phy->role = PHY_ROLE_INITIATOR;
455 sas_phy->oob_mode = OOB_NOT_CONNECTED;
456 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
457 sas_phy->id = phy_no;
458 sas_phy->sas_addr = &hisi_hba->sas_addr[0];
459 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
460 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
461 sas_phy->lldd_phy = phy;
John Garry66139922015-11-18 00:50:48 +0800462
463 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
John Garry976867e2015-11-18 00:50:42 +0800464}
465
John Garry184a4632015-11-18 00:50:52 +0800466static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
467{
468 struct sas_ha_struct *sas_ha = sas_phy->ha;
469 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
470 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
471 struct asd_sas_port *sas_port = sas_phy->port;
472 struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
473 unsigned long flags;
474
475 if (!sas_port)
476 return;
477
478 spin_lock_irqsave(&hisi_hba->lock, flags);
479 port->port_attached = 1;
480 port->id = phy->port_id;
481 phy->port = port;
482 sas_port->lldd_port = port;
483 spin_unlock_irqrestore(&hisi_hba->lock, flags);
484}
485
486static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
487 struct domain_device *device)
488{
489 struct hisi_sas_phy *phy;
490 struct hisi_sas_port *port;
491 struct hisi_sas_slot *slot, *slot2;
492 struct device *dev = &hisi_hba->pdev->dev;
493
494 phy = &hisi_hba->phy[phy_no];
495 port = phy->port;
496 if (!port)
497 return;
498
499 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
500 struct sas_task *task;
501
502 task = slot->task;
503 if (device && task->dev != device)
504 continue;
505
506 dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
507 slot->dlvry_queue, slot->dlvry_queue_slot, task);
508 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
509 }
510}
511
512static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
513{
514 struct domain_device *device;
515 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
516 struct asd_sas_port *sas_port = sas_phy->port;
517
518 list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
519 hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
520}
521
522static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
523 struct domain_device *device)
524{
525 struct asd_sas_port *port = device->port;
526 struct asd_sas_phy *sas_phy;
527
528 list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
529 hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
530}
531
John Garryabda97c2015-11-18 00:50:51 +0800532static void hisi_sas_dev_gone(struct domain_device *device)
533{
534 struct hisi_sas_device *sas_dev = device->lldd_dev;
535 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
536 struct device *dev = &hisi_hba->pdev->dev;
537 u64 dev_id = sas_dev->device_id;
538
539 dev_info(dev, "found dev[%lld:%x] is gone\n",
540 sas_dev->device_id, sas_dev->dev_type);
541
542 hisi_hba->hw->free_device(hisi_hba, sas_dev);
543 device->lldd_dev = NULL;
544 memset(sas_dev, 0, sizeof(*sas_dev));
545 sas_dev->device_id = dev_id;
546 sas_dev->dev_type = SAS_PHY_UNUSED;
547 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
548}
John Garry42e7a692015-11-18 00:50:49 +0800549
550static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
551{
552 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
553}
554
John Garrye4189d52015-11-18 00:50:57 +0800555static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
556 void *funcdata)
557{
558 struct sas_ha_struct *sas_ha = sas_phy->ha;
559 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
560 int phy_no = sas_phy->id;
561
562 switch (func) {
563 case PHY_FUNC_HARD_RESET:
564 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
565 break;
566
567 case PHY_FUNC_LINK_RESET:
568 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
569 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
570 break;
571
572 case PHY_FUNC_DISABLE:
573 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
574 break;
575
576 case PHY_FUNC_SET_LINK_RATE:
577 case PHY_FUNC_RELEASE_SPINUP_HOLD:
578 default:
579 return -EOPNOTSUPP;
580 }
581 return 0;
582}
John Garry184a4632015-11-18 00:50:52 +0800583
John Garry0efff302015-11-18 00:50:56 +0800584static void hisi_sas_task_done(struct sas_task *task)
585{
586 if (!del_timer(&task->slow_task->timer))
587 return;
588 complete(&task->slow_task->completion);
589}
590
591static void hisi_sas_tmf_timedout(unsigned long data)
592{
593 struct sas_task *task = (struct sas_task *)data;
594
595 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
596 complete(&task->slow_task->completion);
597}
598
599#define TASK_TIMEOUT 20
600#define TASK_RETRY 3
601static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
602 void *parameter, u32 para_len,
603 struct hisi_sas_tmf_task *tmf)
604{
605 struct hisi_sas_device *sas_dev = device->lldd_dev;
606 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
607 struct device *dev = &hisi_hba->pdev->dev;
608 struct sas_task *task;
609 int res, retry;
610
611 for (retry = 0; retry < TASK_RETRY; retry++) {
612 task = sas_alloc_slow_task(GFP_KERNEL);
613 if (!task)
614 return -ENOMEM;
615
616 task->dev = device;
617 task->task_proto = device->tproto;
618
619 memcpy(&task->ssp_task, parameter, para_len);
620 task->task_done = hisi_sas_task_done;
621
622 task->slow_task->timer.data = (unsigned long) task;
623 task->slow_task->timer.function = hisi_sas_tmf_timedout;
624 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
625 add_timer(&task->slow_task->timer);
626
627 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
628
629 if (res) {
630 del_timer(&task->slow_task->timer);
631 dev_err(dev, "abort tmf: executing internal task failed: %d\n",
632 res);
633 goto ex_err;
634 }
635
636 wait_for_completion(&task->slow_task->completion);
637 res = TMF_RESP_FUNC_FAILED;
638 /* Even TMF timed out, return direct. */
639 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
640 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
641 dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
642 tmf->tag_of_task_to_be_managed);
643 if (task->lldd_task) {
644 struct hisi_sas_slot *slot =
645 task->lldd_task;
646
647 hisi_sas_slot_task_free(hisi_hba,
648 task, slot);
649 }
650
651 goto ex_err;
652 }
653 }
654
655 if (task->task_status.resp == SAS_TASK_COMPLETE &&
656 task->task_status.stat == SAM_STAT_GOOD) {
657 res = TMF_RESP_FUNC_COMPLETE;
658 break;
659 }
660
661 if (task->task_status.resp == SAS_TASK_COMPLETE &&
662 task->task_status.stat == SAS_DATA_UNDERRUN) {
663 /* no error, but return the number of bytes of
664 * underrun
665 */
666 dev_warn(dev, "abort tmf: task to dev %016llx "
667 "resp: 0x%x sts 0x%x underrun\n",
668 SAS_ADDR(device->sas_addr),
669 task->task_status.resp,
670 task->task_status.stat);
671 res = task->task_status.residual;
672 break;
673 }
674
675 if (task->task_status.resp == SAS_TASK_COMPLETE &&
676 task->task_status.stat == SAS_DATA_OVERRUN) {
677 dev_warn(dev, "abort tmf: blocked task error\n");
678 res = -EMSGSIZE;
679 break;
680 }
681
682 dev_warn(dev, "abort tmf: task to dev "
683 "%016llx resp: 0x%x status 0x%x\n",
684 SAS_ADDR(device->sas_addr), task->task_status.resp,
685 task->task_status.stat);
686 sas_free_task(task);
687 task = NULL;
688 }
689ex_err:
690 WARN_ON(retry == TASK_RETRY);
691 sas_free_task(task);
692 return res;
693}
694
695static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
696 u8 *lun, struct hisi_sas_tmf_task *tmf)
697{
698 struct sas_ssp_task ssp_task;
699
700 if (!(device->tproto & SAS_PROTOCOL_SSP))
701 return TMF_RESP_FUNC_ESUPP;
702
703 memcpy(ssp_task.LUN, lun, 8);
704
705 return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
706 sizeof(ssp_task), tmf);
707}
708
709static int hisi_sas_abort_task(struct sas_task *task)
710{
711 struct scsi_lun lun;
712 struct hisi_sas_tmf_task tmf_task;
713 struct domain_device *device = task->dev;
714 struct hisi_sas_device *sas_dev = device->lldd_dev;
715 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
716 struct device *dev = &hisi_hba->pdev->dev;
717 int rc = TMF_RESP_FUNC_FAILED;
718 unsigned long flags;
719
720 if (!sas_dev) {
721 dev_warn(dev, "Device has been removed\n");
722 return TMF_RESP_FUNC_FAILED;
723 }
724
725 spin_lock_irqsave(&task->task_state_lock, flags);
726 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
727 spin_unlock_irqrestore(&task->task_state_lock, flags);
728 rc = TMF_RESP_FUNC_COMPLETE;
729 goto out;
730 }
731
732 spin_unlock_irqrestore(&task->task_state_lock, flags);
733 sas_dev->dev_status = HISI_SAS_DEV_EH;
734 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
735 struct scsi_cmnd *cmnd = task->uldd_task;
736 struct hisi_sas_slot *slot = task->lldd_task;
737 u32 tag = slot->idx;
738
739 int_to_scsilun(cmnd->device->lun, &lun);
740 tmf_task.tmf = TMF_ABORT_TASK;
741 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
742
743 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
744 &tmf_task);
745
746 /* if successful, clear the task and callback forwards.*/
747 if (rc == TMF_RESP_FUNC_COMPLETE) {
748 if (task->lldd_task) {
749 struct hisi_sas_slot *slot;
750
751 slot = &hisi_hba->slot_info
752 [tmf_task.tag_of_task_to_be_managed];
753 spin_lock_irqsave(&hisi_hba->lock, flags);
754 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
755 spin_unlock_irqrestore(&hisi_hba->lock, flags);
756 }
757 }
758
759 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
760 task->task_proto & SAS_PROTOCOL_STP) {
761 if (task->dev->dev_type == SAS_SATA_DEV) {
762 struct hisi_slot_info *slot = task->lldd_task;
763
764 dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
765 hisi_hba, task, slot);
766 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
767 rc = TMF_RESP_FUNC_COMPLETE;
768 goto out;
769 }
770
771 }
772
773out:
774 if (rc != TMF_RESP_FUNC_COMPLETE)
775 dev_notice(dev, "abort task: rc=%d\n", rc);
776 return rc;
777}
778
779static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
780{
781 struct hisi_sas_tmf_task tmf_task;
782 int rc = TMF_RESP_FUNC_FAILED;
783
784 tmf_task.tmf = TMF_ABORT_TASK_SET;
785 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
786
787 return rc;
788}
789
790static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
791{
792 int rc = TMF_RESP_FUNC_FAILED;
793 struct hisi_sas_tmf_task tmf_task;
794
795 tmf_task.tmf = TMF_CLEAR_ACA;
796 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
797
798 return rc;
799}
800
801static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
802{
803 struct sas_phy *phy = sas_get_local_phy(device);
804 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
805 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
806 rc = sas_phy_reset(phy, reset_type);
807 sas_put_local_phy(phy);
808 msleep(2000);
809 return rc;
810}
811
812static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
813{
814 struct hisi_sas_device *sas_dev = device->lldd_dev;
815 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
816 unsigned long flags;
817 int rc = TMF_RESP_FUNC_FAILED;
818
819 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
820 return TMF_RESP_FUNC_FAILED;
821 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
822
823 rc = hisi_sas_debug_I_T_nexus_reset(device);
824
825 spin_lock_irqsave(&hisi_hba->lock, flags);
826 hisi_sas_release_task(hisi_hba, device);
827 spin_unlock_irqrestore(&hisi_hba->lock, flags);
828
829 return 0;
830}
831
832static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
833{
834 struct hisi_sas_tmf_task tmf_task;
835 struct hisi_sas_device *sas_dev = device->lldd_dev;
836 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
837 struct device *dev = &hisi_hba->pdev->dev;
838 unsigned long flags;
839 int rc = TMF_RESP_FUNC_FAILED;
840
841 tmf_task.tmf = TMF_LU_RESET;
842 sas_dev->dev_status = HISI_SAS_DEV_EH;
843 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
844 if (rc == TMF_RESP_FUNC_COMPLETE) {
845 spin_lock_irqsave(&hisi_hba->lock, flags);
846 hisi_sas_release_task(hisi_hba, device);
847 spin_unlock_irqrestore(&hisi_hba->lock, flags);
848 }
849
850 /* If failed, fall-through I_T_Nexus reset */
851 dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
852 sas_dev->device_id, rc);
853 return rc;
854}
855
856static int hisi_sas_query_task(struct sas_task *task)
857{
858 struct scsi_lun lun;
859 struct hisi_sas_tmf_task tmf_task;
860 int rc = TMF_RESP_FUNC_FAILED;
861
862 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
863 struct scsi_cmnd *cmnd = task->uldd_task;
864 struct domain_device *device = task->dev;
865 struct hisi_sas_slot *slot = task->lldd_task;
866 u32 tag = slot->idx;
867
868 int_to_scsilun(cmnd->device->lun, &lun);
869 tmf_task.tmf = TMF_QUERY_TASK;
870 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
871
872 rc = hisi_sas_debug_issue_ssp_tmf(device,
873 lun.scsi_lun,
874 &tmf_task);
875 switch (rc) {
876 /* The task is still in Lun, release it then */
877 case TMF_RESP_FUNC_SUCC:
878 /* The task is not in Lun or failed, reset the phy */
879 case TMF_RESP_FUNC_FAILED:
880 case TMF_RESP_FUNC_COMPLETE:
881 break;
882 }
883 }
884 return rc;
885}
886
John Garry184a4632015-11-18 00:50:52 +0800887static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
888{
889 hisi_sas_port_notify_formed(sas_phy);
890}
891
892static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
893{
894 hisi_sas_port_notify_deformed(sas_phy);
895}
896
897static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
898{
899 phy->phy_attached = 0;
900 phy->phy_type = 0;
901 phy->port = NULL;
902}
903
904void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
905{
906 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
907 struct asd_sas_phy *sas_phy = &phy->sas_phy;
908 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
909
910 if (rdy) {
911 /* Phy down but ready */
912 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
913 hisi_sas_port_notify_formed(sas_phy);
914 } else {
915 struct hisi_sas_port *port = phy->port;
916
917 /* Phy down and not ready */
918 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
919 sas_phy_disconnected(sas_phy);
920
921 if (port) {
922 if (phy->phy_type & PORT_TYPE_SAS) {
923 int port_id = port->id;
924
925 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
926 port_id))
927 port->port_attached = 0;
928 } else if (phy->phy_type & PORT_TYPE_SATA)
929 port->port_attached = 0;
930 }
931 hisi_sas_phy_disconnected(phy);
932 }
933}
934EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
935
John Garrye8899fa2015-11-18 00:50:30 +0800936static struct scsi_transport_template *hisi_sas_stt;
937
John Garry7eb78692015-11-18 00:50:31 +0800938static struct scsi_host_template hisi_sas_sht = {
939 .module = THIS_MODULE,
940 .name = DRV_NAME,
941 .queuecommand = sas_queuecommand,
942 .target_alloc = sas_target_alloc,
943 .slave_configure = sas_slave_configure,
John Garry701f75e2015-11-18 00:50:55 +0800944 .scan_finished = hisi_sas_scan_finished,
945 .scan_start = hisi_sas_scan_start,
John Garry7eb78692015-11-18 00:50:31 +0800946 .change_queue_depth = sas_change_queue_depth,
947 .bios_param = sas_bios_param,
948 .can_queue = 1,
949 .this_id = -1,
950 .sg_tablesize = SG_ALL,
951 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
952 .use_clustering = ENABLE_CLUSTERING,
953 .eh_device_reset_handler = sas_eh_device_reset_handler,
954 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
955 .target_destroy = sas_target_destroy,
956 .ioctl = sas_ioctl,
957};
958
John Garrye8899fa2015-11-18 00:50:30 +0800959static struct sas_domain_function_template hisi_sas_transport_ops = {
John Garryabda97c2015-11-18 00:50:51 +0800960 .lldd_dev_found = hisi_sas_dev_found,
961 .lldd_dev_gone = hisi_sas_dev_gone,
John Garry42e7a692015-11-18 00:50:49 +0800962 .lldd_execute_task = hisi_sas_queue_command,
John Garrye4189d52015-11-18 00:50:57 +0800963 .lldd_control_phy = hisi_sas_control_phy,
John Garry0efff302015-11-18 00:50:56 +0800964 .lldd_abort_task = hisi_sas_abort_task,
965 .lldd_abort_task_set = hisi_sas_abort_task_set,
966 .lldd_clear_aca = hisi_sas_clear_aca,
967 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
968 .lldd_lu_reset = hisi_sas_lu_reset,
969 .lldd_query_task = hisi_sas_query_task,
John Garry184a4632015-11-18 00:50:52 +0800970 .lldd_port_formed = hisi_sas_port_formed,
971 .lldd_port_deformed = hisi_sas_port_deformed,
John Garrye8899fa2015-11-18 00:50:30 +0800972};
973
John Garry6be6de12015-11-18 00:50:34 +0800974static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
975{
976 int i, s;
977 struct platform_device *pdev = hisi_hba->pdev;
978 struct device *dev = &pdev->dev;
979
John Garryfa42d802015-11-18 00:50:43 +0800980 spin_lock_init(&hisi_hba->lock);
John Garry976867e2015-11-18 00:50:42 +0800981 for (i = 0; i < hisi_hba->n_phy; i++) {
982 hisi_sas_phy_init(hisi_hba, i);
983 hisi_hba->port[i].port_attached = 0;
984 hisi_hba->port[i].id = -1;
985 INIT_LIST_HEAD(&hisi_hba->port[i].list);
986 }
987
John Garryaf740db2015-11-18 00:50:41 +0800988 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
989 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
990 hisi_hba->devices[i].device_id = i;
991 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
992 }
993
John Garry6be6de12015-11-18 00:50:34 +0800994 for (i = 0; i < hisi_hba->queue_count; i++) {
John Garry9101a072015-11-18 00:50:37 +0800995 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
996
997 /* Completion queue structure */
998 cq->id = i;
999 cq->hisi_hba = hisi_hba;
1000
John Garry6be6de12015-11-18 00:50:34 +08001001 /* Delivery queue */
1002 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1003 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1004 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1005 if (!hisi_hba->cmd_hdr[i])
1006 goto err_out;
1007 memset(hisi_hba->cmd_hdr[i], 0, s);
1008
1009 /* Completion queue */
1010 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1011 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1012 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1013 if (!hisi_hba->complete_hdr[i])
1014 goto err_out;
1015 memset(hisi_hba->complete_hdr[i], 0, s);
1016 }
1017
1018 s = HISI_SAS_STATUS_BUF_SZ;
1019 hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1020 dev, s, 16, 0);
1021 if (!hisi_hba->status_buffer_pool)
1022 goto err_out;
1023
1024 s = HISI_SAS_COMMAND_TABLE_SZ;
1025 hisi_hba->command_table_pool = dma_pool_create("command_table",
1026 dev, s, 16, 0);
1027 if (!hisi_hba->command_table_pool)
1028 goto err_out;
1029
1030 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1031 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1032 GFP_KERNEL);
1033 if (!hisi_hba->itct)
1034 goto err_out;
1035
1036 memset(hisi_hba->itct, 0, s);
1037
1038 hisi_hba->slot_info = devm_kcalloc(dev, HISI_SAS_COMMAND_ENTRIES,
1039 sizeof(struct hisi_sas_slot),
1040 GFP_KERNEL);
1041 if (!hisi_hba->slot_info)
1042 goto err_out;
1043
1044 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
1045 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1046 GFP_KERNEL);
1047 if (!hisi_hba->iost)
1048 goto err_out;
1049
1050 memset(hisi_hba->iost, 0, s);
1051
1052 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
1053 hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1054 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1055 if (!hisi_hba->breakpoint)
1056 goto err_out;
1057
1058 memset(hisi_hba->breakpoint, 0, s);
1059
John Garry257efd12015-11-18 00:50:36 +08001060 hisi_hba->slot_index_count = HISI_SAS_COMMAND_ENTRIES;
1061 s = hisi_hba->slot_index_count / sizeof(unsigned long);
1062 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1063 if (!hisi_hba->slot_index_tags)
1064 goto err_out;
1065
John Garry6be6de12015-11-18 00:50:34 +08001066 hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1067 sizeof(struct hisi_sas_sge_page), 16, 0);
1068 if (!hisi_hba->sge_page_pool)
1069 goto err_out;
1070
1071 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1072 hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1073 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1074 if (!hisi_hba->initial_fis)
1075 goto err_out;
1076 memset(hisi_hba->initial_fis, 0, s);
1077
1078 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
1079 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1080 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1081 if (!hisi_hba->sata_breakpoint)
1082 goto err_out;
1083 memset(hisi_hba->sata_breakpoint, 0, s);
1084
John Garry257efd12015-11-18 00:50:36 +08001085 hisi_sas_slot_index_init(hisi_hba);
1086
John Garry7e9080e2015-11-18 00:50:40 +08001087 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1088 if (!hisi_hba->wq) {
1089 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1090 goto err_out;
1091 }
1092
John Garry6be6de12015-11-18 00:50:34 +08001093 return 0;
1094err_out:
1095 return -ENOMEM;
1096}
1097
John Garry89d53322015-11-18 00:50:35 +08001098static void hisi_sas_free(struct hisi_hba *hisi_hba)
1099{
1100 struct device *dev = &hisi_hba->pdev->dev;
1101 int i, s;
1102
1103 for (i = 0; i < hisi_hba->queue_count; i++) {
1104 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1105 if (hisi_hba->cmd_hdr[i])
1106 dma_free_coherent(dev, s,
1107 hisi_hba->cmd_hdr[i],
1108 hisi_hba->cmd_hdr_dma[i]);
1109
1110 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1111 if (hisi_hba->complete_hdr[i])
1112 dma_free_coherent(dev, s,
1113 hisi_hba->complete_hdr[i],
1114 hisi_hba->complete_hdr_dma[i]);
1115 }
1116
1117 dma_pool_destroy(hisi_hba->status_buffer_pool);
1118 dma_pool_destroy(hisi_hba->command_table_pool);
1119 dma_pool_destroy(hisi_hba->sge_page_pool);
1120
1121 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1122 if (hisi_hba->itct)
1123 dma_free_coherent(dev, s,
1124 hisi_hba->itct, hisi_hba->itct_dma);
1125
1126 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
1127 if (hisi_hba->iost)
1128 dma_free_coherent(dev, s,
1129 hisi_hba->iost, hisi_hba->iost_dma);
1130
1131 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
1132 if (hisi_hba->breakpoint)
1133 dma_free_coherent(dev, s,
1134 hisi_hba->breakpoint,
1135 hisi_hba->breakpoint_dma);
1136
1137
1138 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1139 if (hisi_hba->initial_fis)
1140 dma_free_coherent(dev, s,
1141 hisi_hba->initial_fis,
1142 hisi_hba->initial_fis_dma);
1143
1144 s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
1145 if (hisi_hba->sata_breakpoint)
1146 dma_free_coherent(dev, s,
1147 hisi_hba->sata_breakpoint,
1148 hisi_hba->sata_breakpoint_dma);
1149
John Garry7e9080e2015-11-18 00:50:40 +08001150 if (hisi_hba->wq)
1151 destroy_workqueue(hisi_hba->wq);
John Garry89d53322015-11-18 00:50:35 +08001152}
John Garry6be6de12015-11-18 00:50:34 +08001153
John Garry7eb78692015-11-18 00:50:31 +08001154static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1155 const struct hisi_sas_hw *hw)
1156{
John Garrye26b2f42015-11-18 00:50:32 +08001157 struct resource *res;
John Garry7eb78692015-11-18 00:50:31 +08001158 struct Scsi_Host *shost;
1159 struct hisi_hba *hisi_hba;
1160 struct device *dev = &pdev->dev;
John Garrye26b2f42015-11-18 00:50:32 +08001161 struct device_node *np = pdev->dev.of_node;
1162 struct property *sas_addr_prop;
1163 int num;
John Garry7eb78692015-11-18 00:50:31 +08001164
1165 shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1166 if (!shost)
1167 goto err_out;
1168 hisi_hba = shost_priv(shost);
1169
1170 hisi_hba->hw = hw;
1171 hisi_hba->pdev = pdev;
1172 hisi_hba->shost = shost;
1173 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1174
John Garryfa42d802015-11-18 00:50:43 +08001175 init_timer(&hisi_hba->timer);
1176
John Garrye26b2f42015-11-18 00:50:32 +08001177 sas_addr_prop = of_find_property(np, "sas-addr", NULL);
1178 if (!sas_addr_prop || (sas_addr_prop->length != SAS_ADDR_SIZE))
1179 goto err_out;
1180 memcpy(hisi_hba->sas_addr, sas_addr_prop->value, SAS_ADDR_SIZE);
1181
1182 if (of_property_read_u32(np, "ctrl-reset-reg",
1183 &hisi_hba->ctrl_reset_reg))
1184 goto err_out;
1185
1186 if (of_property_read_u32(np, "ctrl-reset-sts-reg",
1187 &hisi_hba->ctrl_reset_sts_reg))
1188 goto err_out;
1189
1190 if (of_property_read_u32(np, "ctrl-clock-ena-reg",
1191 &hisi_hba->ctrl_clock_ena_reg))
1192 goto err_out;
1193
1194 if (of_property_read_u32(np, "phy-count", &hisi_hba->n_phy))
1195 goto err_out;
1196
1197 if (of_property_read_u32(np, "queue-count", &hisi_hba->queue_count))
1198 goto err_out;
1199
1200 num = of_irq_count(np);
1201 hisi_hba->int_names = devm_kcalloc(dev, num,
1202 HISI_SAS_NAME_LEN,
1203 GFP_KERNEL);
1204 if (!hisi_hba->int_names)
1205 goto err_out;
1206
1207 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1208 hisi_hba->regs = devm_ioremap_resource(dev, res);
1209 if (IS_ERR(hisi_hba->regs))
1210 goto err_out;
1211
1212 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(
1213 np, "hisilicon,sas-syscon");
1214 if (IS_ERR(hisi_hba->ctrl))
1215 goto err_out;
1216
John Garry89d53322015-11-18 00:50:35 +08001217 if (hisi_sas_alloc(hisi_hba, shost)) {
1218 hisi_sas_free(hisi_hba);
John Garry6be6de12015-11-18 00:50:34 +08001219 goto err_out;
John Garry89d53322015-11-18 00:50:35 +08001220 }
John Garry6be6de12015-11-18 00:50:34 +08001221
John Garry7eb78692015-11-18 00:50:31 +08001222 return shost;
1223err_out:
1224 dev_err(dev, "shost alloc failed\n");
1225 return NULL;
1226}
1227
John Garry5d742422015-11-18 00:50:38 +08001228static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1229{
1230 int i;
1231
1232 for (i = 0; i < hisi_hba->n_phy; i++)
1233 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1234 hisi_hba->sas_addr,
1235 SAS_ADDR_SIZE);
1236}
1237
John Garry7eb78692015-11-18 00:50:31 +08001238int hisi_sas_probe(struct platform_device *pdev,
1239 const struct hisi_sas_hw *hw)
1240{
1241 struct Scsi_Host *shost;
1242 struct hisi_hba *hisi_hba;
1243 struct device *dev = &pdev->dev;
1244 struct asd_sas_phy **arr_phy;
1245 struct asd_sas_port **arr_port;
1246 struct sas_ha_struct *sha;
1247 int rc, phy_nr, port_nr, i;
1248
1249 shost = hisi_sas_shost_alloc(pdev, hw);
1250 if (!shost) {
1251 rc = -ENOMEM;
1252 goto err_out_ha;
1253 }
1254
1255 sha = SHOST_TO_SAS_HA(shost);
1256 hisi_hba = shost_priv(shost);
1257 platform_set_drvdata(pdev, sha);
John Garry50cb9162015-11-18 00:50:39 +08001258
1259 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1260 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1261 dev_err(dev, "No usable DMA addressing method\n");
1262 rc = -EIO;
1263 goto err_out_ha;
1264 }
1265
John Garry7eb78692015-11-18 00:50:31 +08001266 phy_nr = port_nr = hisi_hba->n_phy;
1267
1268 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1269 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1270 if (!arr_phy || !arr_port)
1271 return -ENOMEM;
1272
1273 sha->sas_phy = arr_phy;
1274 sha->sas_port = arr_port;
1275 sha->core.shost = shost;
1276 sha->lldd_ha = hisi_hba;
1277
1278 shost->transportt = hisi_sas_stt;
1279 shost->max_id = HISI_SAS_MAX_DEVICES;
1280 shost->max_lun = ~0;
1281 shost->max_channel = 1;
1282 shost->max_cmd_len = 16;
1283 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1284 shost->can_queue = HISI_SAS_COMMAND_ENTRIES;
1285 shost->cmd_per_lun = HISI_SAS_COMMAND_ENTRIES;
1286
1287 sha->sas_ha_name = DRV_NAME;
1288 sha->dev = &hisi_hba->pdev->dev;
1289 sha->lldd_module = THIS_MODULE;
1290 sha->sas_addr = &hisi_hba->sas_addr[0];
1291 sha->num_phys = hisi_hba->n_phy;
1292 sha->core.shost = hisi_hba->shost;
1293
1294 for (i = 0; i < hisi_hba->n_phy; i++) {
1295 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1296 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1297 }
1298
John Garry5d742422015-11-18 00:50:38 +08001299 hisi_sas_init_add(hisi_hba);
1300
John Garry8ff1d572015-11-18 00:50:46 +08001301 rc = hisi_hba->hw->hw_init(hisi_hba);
1302 if (rc)
1303 goto err_out_ha;
1304
John Garry7eb78692015-11-18 00:50:31 +08001305 rc = scsi_add_host(shost, &pdev->dev);
1306 if (rc)
1307 goto err_out_ha;
1308
1309 rc = sas_register_ha(sha);
1310 if (rc)
1311 goto err_out_register_ha;
1312
1313 scsi_scan_host(shost);
1314
1315 return 0;
1316
1317err_out_register_ha:
1318 scsi_remove_host(shost);
1319err_out_ha:
1320 kfree(shost);
1321 return rc;
1322}
1323EXPORT_SYMBOL_GPL(hisi_sas_probe);
1324
John Garry89d53322015-11-18 00:50:35 +08001325int hisi_sas_remove(struct platform_device *pdev)
1326{
1327 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1328 struct hisi_hba *hisi_hba = sha->lldd_ha;
1329
1330 scsi_remove_host(sha->core.shost);
1331 sas_unregister_ha(sha);
1332 sas_remove_host(sha->core.shost);
1333
1334 hisi_sas_free(hisi_hba);
1335 return 0;
1336}
1337EXPORT_SYMBOL_GPL(hisi_sas_remove);
1338
John Garrye8899fa2015-11-18 00:50:30 +08001339static __init int hisi_sas_init(void)
1340{
1341 pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1342
1343 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1344 if (!hisi_sas_stt)
1345 return -ENOMEM;
1346
1347 return 0;
1348}
1349
1350static __exit void hisi_sas_exit(void)
1351{
1352 sas_release_transport(hisi_sas_stt);
1353}
1354
1355module_init(hisi_sas_init);
1356module_exit(hisi_sas_exit);
1357
1358MODULE_VERSION(DRV_VERSION);
1359MODULE_LICENSE("GPL");
1360MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1361MODULE_DESCRIPTION("HISILICON SAS controller driver");
1362MODULE_ALIAS("platform:" DRV_NAME);