blob: 1540a7785e14743ae1b035aeb21d391af8516050 [file] [log] [blame]
Jack Steiner9cc9b052009-06-17 16:28:19 -07001/*
2 * SN Platform GRU Driver
3 *
4 * Dump GRU State
5 *
Jack Steiner8820f272009-06-17 16:28:36 -07006 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
Jack Steiner9cc9b052009-06-17 16:28:19 -07007 *
Jack Steiner8820f272009-06-17 16:28:36 -07008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Jack Steiner9cc9b052009-06-17 16:28:19 -070021 */
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/spinlock.h>
26#include <linux/uaccess.h>
27#include <linux/delay.h>
28#include <linux/bitops.h>
29#include <asm/uv/uv_hub.h>
Gustavo A. R. Silva7810fe92018-10-16 12:59:44 +020030
31#include <linux/nospec.h>
32
Jack Steiner9cc9b052009-06-17 16:28:19 -070033#include "gru.h"
34#include "grutables.h"
35#include "gruhandles.h"
36#include "grulib.h"
37
38#define CCH_LOCK_ATTEMPTS 10
39
40static int gru_user_copy_handle(void __user **dp, void *s)
41{
Jack Steiner2b702b22009-06-17 16:28:34 -070042 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
Jack Steiner9cc9b052009-06-17 16:28:19 -070043 return -1;
44 *dp += GRU_HANDLE_BYTES;
45 return 0;
46}
47
48static int gru_dump_context_data(void *grubase,
49 struct gru_context_configuration_handle *cch,
Jack Steinerb8229be2009-12-15 16:48:09 -080050 void __user *ubuf, int ctxnum, int dsrcnt,
51 int flush_cbrs)
Jack Steiner9cc9b052009-06-17 16:28:19 -070052{
53 void *cb, *cbe, *tfh, *gseg;
54 int i, scr;
55
56 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
57 cb = gseg + GRU_CB_BASE;
58 cbe = grubase + GRU_CBE_BASE;
59 tfh = grubase + GRU_TFH_BASE;
60
61 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
Jack Steinerb8229be2009-12-15 16:48:09 -080062 if (flush_cbrs)
63 gru_flush_cache(cb);
Jack Steiner9cc9b052009-06-17 16:28:19 -070064 if (gru_user_copy_handle(&ubuf, cb))
65 goto fail;
66 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
67 goto fail;
68 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
69 goto fail;
70 cb += GRU_HANDLE_STRIDE;
71 }
72 if (dsrcnt)
73 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
74 return 0;
75
76fail:
77 return -EFAULT;
78}
79
80static int gru_dump_tfm(struct gru_state *gru,
81 void __user *ubuf, void __user *ubufend)
82{
83 struct gru_tlb_fault_map *tfm;
Sudip Mukherjeea010d272015-09-03 20:20:47 +053084 int i;
Jack Steiner9cc9b052009-06-17 16:28:19 -070085
Sudip Mukherjeea010d272015-09-03 20:20:47 +053086 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
87 return -EFBIG;
Jack Steiner9cc9b052009-06-17 16:28:19 -070088
89 for (i = 0; i < GRU_NUM_TFM; i++) {
90 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
91 if (gru_user_copy_handle(&ubuf, tfm))
92 goto fail;
93 }
94 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
95
96fail:
97 return -EFAULT;
98}
99
100static int gru_dump_tgh(struct gru_state *gru,
101 void __user *ubuf, void __user *ubufend)
102{
103 struct gru_tlb_global_handle *tgh;
Sudip Mukherjeea010d272015-09-03 20:20:47 +0530104 int i;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700105
Sudip Mukherjeea010d272015-09-03 20:20:47 +0530106 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
107 return -EFBIG;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700108
109 for (i = 0; i < GRU_NUM_TGH; i++) {
110 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
111 if (gru_user_copy_handle(&ubuf, tgh))
112 goto fail;
113 }
114 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
115
116fail:
117 return -EFAULT;
118}
119
120static int gru_dump_context(struct gru_state *gru, int ctxnum,
121 void __user *ubuf, void __user *ubufend, char data_opt,
Jack Steinerb8229be2009-12-15 16:48:09 -0800122 char lock_cch, char flush_cbrs)
Jack Steiner9cc9b052009-06-17 16:28:19 -0700123{
124 struct gru_dump_context_header hdr;
125 struct gru_dump_context_header __user *uhdr = ubuf;
Jack Steiner2b702b22009-06-17 16:28:34 -0700126 struct gru_context_configuration_handle *cch, *ubufcch;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700127 struct gru_thread_state *gts;
128 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
129 void *grubase;
130
131 memset(&hdr, 0, sizeof(hdr));
132 grubase = gru->gs_gru_base_vaddr;
133 cch = get_cch(grubase, ctxnum);
134 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
135 cch_locked = trylock_cch_handle(cch);
136 if (cch_locked)
137 break;
138 msleep(1);
139 }
140
141 ubuf += sizeof(hdr);
Jack Steiner2b702b22009-06-17 16:28:34 -0700142 ubufcch = ubuf;
Dan Carpenter49d3d6c2014-02-10 14:25:30 -0800143 if (gru_user_copy_handle(&ubuf, cch)) {
144 if (cch_locked)
145 unlock_cch_handle(cch);
146 return -EFAULT;
147 }
Jack Steiner2b702b22009-06-17 16:28:34 -0700148 if (cch_locked)
149 ubufcch->delresp = 0;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700150 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
151
152 if (cch_locked || !lock_cch) {
153 gts = gru->gs_gts[ctxnum];
Jack Steiner836ce672009-06-17 16:28:22 -0700154 if (gts && gts->ts_vma) {
Jack Steiner9cc9b052009-06-17 16:28:19 -0700155 hdr.pid = gts->ts_tgid_owner;
156 hdr.vaddr = gts->ts_vma->vm_start;
157 }
158 if (cch->state != CCHSTATE_INACTIVE) {
159 cbrcnt = hweight64(cch->cbr_allocation_map) *
160 GRU_CBR_AU_SIZE;
161 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
162 GRU_DSR_AU_CL : 0;
163 }
164 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
165 if (bytes > ubufend - ubuf)
166 ret = -EFBIG;
167 else
168 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
Jack Steinerb8229be2009-12-15 16:48:09 -0800169 dsrcnt, flush_cbrs);
Jack Steiner9cc9b052009-06-17 16:28:19 -0700170 }
171 if (cch_locked)
172 unlock_cch_handle(cch);
173 if (ret)
174 return ret;
175
176 hdr.magic = GRU_DUMP_MAGIC;
Jack Steiner2b702b22009-06-17 16:28:34 -0700177 hdr.gid = gru->gs_gid;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700178 hdr.ctxnum = ctxnum;
179 hdr.cbrcnt = cbrcnt;
180 hdr.dsrcnt = dsrcnt;
181 hdr.cch_locked = cch_locked;
Dan Carpenterb6a83d92014-04-07 15:39:03 -0700182 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
183 return -EFAULT;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700184
Dan Carpenterb6a83d92014-04-07 15:39:03 -0700185 return bytes;
Jack Steiner9cc9b052009-06-17 16:28:19 -0700186}
187
188int gru_dump_chiplet_request(unsigned long arg)
189{
190 struct gru_state *gru;
191 struct gru_dump_chiplet_state_req req;
192 void __user *ubuf;
193 void __user *ubufend;
194 int ctxnum, ret, cnt = 0;
195
196 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
197 return -EFAULT;
198
199 /* Currently, only dump by gid is implemented */
Sudip Mukherjeec2ed5452015-09-03 20:20:49 +0530200 if (req.gid >= gru_max_gids)
Jack Steiner9cc9b052009-06-17 16:28:19 -0700201 return -EINVAL;
Gustavo A. R. Silva7810fe92018-10-16 12:59:44 +0200202 req.gid = array_index_nospec(req.gid, gru_max_gids);
Jack Steiner9cc9b052009-06-17 16:28:19 -0700203
204 gru = GID_TO_GRU(req.gid);
205 ubuf = req.buf;
206 ubufend = req.buf + req.buflen;
207
208 ret = gru_dump_tfm(gru, ubuf, ubufend);
209 if (ret < 0)
210 goto fail;
211 ubuf += ret;
212
213 ret = gru_dump_tgh(gru, ubuf, ubufend);
214 if (ret < 0)
215 goto fail;
216 ubuf += ret;
217
218 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
219 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
220 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
Jack Steinerb8229be2009-12-15 16:48:09 -0800221 req.data_opt, req.lock_cch,
222 req.flush_cbrs);
Jack Steiner9cc9b052009-06-17 16:28:19 -0700223 if (ret < 0)
224 goto fail;
225 ubuf += ret;
226 cnt++;
227 }
228 }
229
230 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
231 return -EFAULT;
232 return cnt;
233
234fail:
235 return ret;
236}