blob: 313da31502626897a61a65606aa117d0d67dc83b [file] [log] [blame]
Jack Steiner9cc9b052009-06-17 16:28:19 -07001/*
2 * SN Platform GRU Driver
3 *
4 * Dump GRU State
5 *
Jack Steiner8820f272009-06-17 16:28:36 -07006 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
Jack Steiner9cc9b052009-06-17 16:28:19 -07007 *
Jack Steiner8820f272009-06-17 16:28:36 -07008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Jack Steiner9cc9b052009-06-17 16:28:19 -070021 */
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/spinlock.h>
26#include <linux/uaccess.h>
27#include <linux/delay.h>
28#include <linux/bitops.h>
29#include <asm/uv/uv_hub.h>
30#include "gru.h"
31#include "grutables.h"
32#include "gruhandles.h"
33#include "grulib.h"
34
35#define CCH_LOCK_ATTEMPTS 10
36
37static int gru_user_copy_handle(void __user **dp, void *s)
38{
Jack Steiner2b702b22009-06-17 16:28:34 -070039 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
Jack Steiner9cc9b052009-06-17 16:28:19 -070040 return -1;
41 *dp += GRU_HANDLE_BYTES;
42 return 0;
43}
44
45static int gru_dump_context_data(void *grubase,
46 struct gru_context_configuration_handle *cch,
Jack Steinerb8229be2009-12-15 16:48:09 -080047 void __user *ubuf, int ctxnum, int dsrcnt,
48 int flush_cbrs)
Jack Steiner9cc9b052009-06-17 16:28:19 -070049{
50 void *cb, *cbe, *tfh, *gseg;
51 int i, scr;
52
53 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
54 cb = gseg + GRU_CB_BASE;
55 cbe = grubase + GRU_CBE_BASE;
56 tfh = grubase + GRU_TFH_BASE;
57
58 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
Jack Steinerb8229be2009-12-15 16:48:09 -080059 if (flush_cbrs)
60 gru_flush_cache(cb);
Jack Steiner9cc9b052009-06-17 16:28:19 -070061 if (gru_user_copy_handle(&ubuf, cb))
62 goto fail;
63 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
64 goto fail;
65 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
66 goto fail;
67 cb += GRU_HANDLE_STRIDE;
68 }
69 if (dsrcnt)
70 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
71 return 0;
72
73fail:
74 return -EFAULT;
75}
76
77static int gru_dump_tfm(struct gru_state *gru,
78 void __user *ubuf, void __user *ubufend)
79{
80 struct gru_tlb_fault_map *tfm;
Sudip Mukherjeea010d272015-09-03 20:20:47 +053081 int i;
Jack Steiner9cc9b052009-06-17 16:28:19 -070082
Sudip Mukherjeea010d272015-09-03 20:20:47 +053083 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
84 return -EFBIG;
Jack Steiner9cc9b052009-06-17 16:28:19 -070085
86 for (i = 0; i < GRU_NUM_TFM; i++) {
87 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
88 if (gru_user_copy_handle(&ubuf, tfm))
89 goto fail;
90 }
91 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
92
93fail:
94 return -EFAULT;
95}
96
97static int gru_dump_tgh(struct gru_state *gru,
98 void __user *ubuf, void __user *ubufend)
99{
100 struct gru_tlb_global_handle *tgh;
Sudip Mukherjeea010d272015-09-03 20:20:47 +0530101 int i;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700102
Sudip Mukherjeea010d272015-09-03 20:20:47 +0530103 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
104 return -EFBIG;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700105
106 for (i = 0; i < GRU_NUM_TGH; i++) {
107 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
108 if (gru_user_copy_handle(&ubuf, tgh))
109 goto fail;
110 }
111 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
112
113fail:
114 return -EFAULT;
115}
116
117static int gru_dump_context(struct gru_state *gru, int ctxnum,
118 void __user *ubuf, void __user *ubufend, char data_opt,
Jack Steinerb8229be2009-12-15 16:48:09 -0800119 char lock_cch, char flush_cbrs)
Jack Steiner9cc9b052009-06-17 16:28:19 -0700120{
121 struct gru_dump_context_header hdr;
122 struct gru_dump_context_header __user *uhdr = ubuf;
Jack Steiner2b702b22009-06-17 16:28:34 -0700123 struct gru_context_configuration_handle *cch, *ubufcch;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700124 struct gru_thread_state *gts;
125 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
126 void *grubase;
127
128 memset(&hdr, 0, sizeof(hdr));
129 grubase = gru->gs_gru_base_vaddr;
130 cch = get_cch(grubase, ctxnum);
131 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
132 cch_locked = trylock_cch_handle(cch);
133 if (cch_locked)
134 break;
135 msleep(1);
136 }
137
138 ubuf += sizeof(hdr);
Jack Steiner2b702b22009-06-17 16:28:34 -0700139 ubufcch = ubuf;
Dan Carpenter49d3d6c2014-02-10 14:25:30 -0800140 if (gru_user_copy_handle(&ubuf, cch)) {
141 if (cch_locked)
142 unlock_cch_handle(cch);
143 return -EFAULT;
144 }
Jack Steiner2b702b22009-06-17 16:28:34 -0700145 if (cch_locked)
146 ubufcch->delresp = 0;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700147 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
148
149 if (cch_locked || !lock_cch) {
150 gts = gru->gs_gts[ctxnum];
Jack Steiner836ce672009-06-17 16:28:22 -0700151 if (gts && gts->ts_vma) {
Jack Steiner9cc9b052009-06-17 16:28:19 -0700152 hdr.pid = gts->ts_tgid_owner;
153 hdr.vaddr = gts->ts_vma->vm_start;
154 }
155 if (cch->state != CCHSTATE_INACTIVE) {
156 cbrcnt = hweight64(cch->cbr_allocation_map) *
157 GRU_CBR_AU_SIZE;
158 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
159 GRU_DSR_AU_CL : 0;
160 }
161 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
162 if (bytes > ubufend - ubuf)
163 ret = -EFBIG;
164 else
165 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
Jack Steinerb8229be2009-12-15 16:48:09 -0800166 dsrcnt, flush_cbrs);
Jack Steiner9cc9b052009-06-17 16:28:19 -0700167 }
168 if (cch_locked)
169 unlock_cch_handle(cch);
170 if (ret)
171 return ret;
172
173 hdr.magic = GRU_DUMP_MAGIC;
Jack Steiner2b702b22009-06-17 16:28:34 -0700174 hdr.gid = gru->gs_gid;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700175 hdr.ctxnum = ctxnum;
176 hdr.cbrcnt = cbrcnt;
177 hdr.dsrcnt = dsrcnt;
178 hdr.cch_locked = cch_locked;
Dan Carpenterb6a83d92014-04-07 15:39:03 -0700179 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
180 return -EFAULT;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700181
Dan Carpenterb6a83d92014-04-07 15:39:03 -0700182 return bytes;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700183}
184
185int gru_dump_chiplet_request(unsigned long arg)
186{
187 struct gru_state *gru;
188 struct gru_dump_chiplet_state_req req;
189 void __user *ubuf;
190 void __user *ubufend;
191 int ctxnum, ret, cnt = 0;
192
193 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
194 return -EFAULT;
195
196 /* Currently, only dump by gid is implemented */
Sudip Mukherjeec2ed5452015-09-03 20:20:49 +0530197 if (req.gid >= gru_max_gids)
Jack Steiner9cc9b052009-06-17 16:28:19 -0700198 return -EINVAL;
199
200 gru = GID_TO_GRU(req.gid);
201 ubuf = req.buf;
202 ubufend = req.buf + req.buflen;
203
204 ret = gru_dump_tfm(gru, ubuf, ubufend);
205 if (ret < 0)
206 goto fail;
207 ubuf += ret;
208
209 ret = gru_dump_tgh(gru, ubuf, ubufend);
210 if (ret < 0)
211 goto fail;
212 ubuf += ret;
213
214 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
215 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
216 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
Jack Steinerb8229be2009-12-15 16:48:09 -0800217 req.data_opt, req.lock_cch,
218 req.flush_cbrs);
Jack Steiner9cc9b052009-06-17 16:28:19 -0700219 if (ret < 0)
220 goto fail;
221 ubuf += ret;
222 cnt++;
223 }
224 }
225
226 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
227 return -EFAULT;
228 return cnt;
229
230fail:
231 return ret;
232}