blob: 62e7d383fa64e1972281b96d5b0db5eef4c139a5 [file] [log] [blame]
Daniel De Graafe2683952013-07-30 13:29:47 -04001/*
2 * Implementation of the Xen vTPM device frontend
3 *
4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2,
8 * as published by the Free Software Foundation.
9 */
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
Rob Herring8702c672013-10-10 14:38:27 +000013#include <xen/xen.h>
Daniel De Graafe2683952013-07-30 13:29:47 -040014#include <xen/events.h>
15#include <xen/interface/io/tpmif.h>
16#include <xen/grant_table.h>
17#include <xen/xenbus.h>
18#include <xen/page.h>
19#include "tpm.h"
Konrad Rzeszutek Wilk51c71a32013-11-26 15:05:40 -050020#include <xen/platform_pci.h>
Daniel De Graafe2683952013-07-30 13:29:47 -040021
22struct tpm_private {
23 struct tpm_chip *chip;
24 struct xenbus_device *dev;
25
26 struct vtpm_shared_page *shr;
27
28 unsigned int evtchn;
29 int ring_ref;
30 domid_t backend_id;
31};
32
33enum status_bits {
34 VTPM_STATUS_RUNNING = 0x1,
35 VTPM_STATUS_IDLE = 0x2,
36 VTPM_STATUS_RESULT = 0x4,
37 VTPM_STATUS_CANCELED = 0x8,
38};
39
40static u8 vtpm_status(struct tpm_chip *chip)
41{
42 struct tpm_private *priv = TPM_VPRIV(chip);
43 switch (priv->shr->state) {
44 case VTPM_STATE_IDLE:
45 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
46 case VTPM_STATE_FINISH:
47 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
48 case VTPM_STATE_SUBMIT:
49 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
50 return VTPM_STATUS_RUNNING;
51 default:
52 return 0;
53 }
54}
55
56static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
57{
58 return status & VTPM_STATUS_CANCELED;
59}
60
61static void vtpm_cancel(struct tpm_chip *chip)
62{
63 struct tpm_private *priv = TPM_VPRIV(chip);
64 priv->shr->state = VTPM_STATE_CANCEL;
65 wmb();
66 notify_remote_via_evtchn(priv->evtchn);
67}
68
69static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
70{
71 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
72}
73
74static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
75{
76 struct tpm_private *priv = TPM_VPRIV(chip);
77 struct vtpm_shared_page *shr = priv->shr;
78 unsigned int offset = shr_data_offset(shr);
79
80 u32 ordinal;
81 unsigned long duration;
82
83 if (offset > PAGE_SIZE)
84 return -EINVAL;
85
86 if (offset + count > PAGE_SIZE)
87 return -EINVAL;
88
89 /* Wait for completion of any existing command or cancellation */
90 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
91 &chip->vendor.read_queue, true) < 0) {
92 vtpm_cancel(chip);
93 return -ETIME;
94 }
95
96 memcpy(offset + (u8 *)shr, buf, count);
97 shr->length = count;
98 barrier();
99 shr->state = VTPM_STATE_SUBMIT;
100 wmb();
101 notify_remote_via_evtchn(priv->evtchn);
102
103 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
104 duration = tpm_calc_ordinal_duration(chip, ordinal);
105
106 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
107 &chip->vendor.read_queue, true) < 0) {
108 /* got a signal or timeout, try to cancel */
109 vtpm_cancel(chip);
110 return -ETIME;
111 }
112
113 return count;
114}
115
116static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
117{
118 struct tpm_private *priv = TPM_VPRIV(chip);
119 struct vtpm_shared_page *shr = priv->shr;
120 unsigned int offset = shr_data_offset(shr);
121 size_t length = shr->length;
122
123 if (shr->state == VTPM_STATE_IDLE)
124 return -ECANCELED;
125
126 /* In theory the wait at the end of _send makes this one unnecessary */
127 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
128 &chip->vendor.read_queue, true) < 0) {
129 vtpm_cancel(chip);
130 return -ETIME;
131 }
132
133 if (offset > PAGE_SIZE)
134 return -EIO;
135
136 if (offset + length > PAGE_SIZE)
137 length = PAGE_SIZE - offset;
138
139 if (length > count)
140 length = count;
141
142 memcpy(buf, offset + (u8 *)shr, length);
143
144 return length;
145}
146
Daniel De Graafe2683952013-07-30 13:29:47 -0400147static const struct file_operations vtpm_ops = {
148 .owner = THIS_MODULE,
149 .llseek = no_llseek,
150 .open = tpm_open,
151 .read = tpm_read,
152 .write = tpm_write,
153 .release = tpm_release,
154};
155
156static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
157static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
158static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
159static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
160static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
161static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
162 NULL);
163static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
164static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
165static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
166static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
Daniel De Graafe2683952013-07-30 13:29:47 -0400167
168static struct attribute *vtpm_attrs[] = {
169 &dev_attr_pubek.attr,
170 &dev_attr_pcrs.attr,
171 &dev_attr_enabled.attr,
172 &dev_attr_active.attr,
173 &dev_attr_owned.attr,
174 &dev_attr_temp_deactivated.attr,
175 &dev_attr_caps.attr,
176 &dev_attr_cancel.attr,
177 &dev_attr_durations.attr,
178 &dev_attr_timeouts.attr,
Daniel De Graafe2683952013-07-30 13:29:47 -0400179 NULL,
180};
181
182static struct attribute_group vtpm_attr_grp = {
183 .attrs = vtpm_attrs,
184};
185
Daniel De Graafe2683952013-07-30 13:29:47 -0400186static const struct tpm_vendor_specific tpm_vtpm = {
187 .status = vtpm_status,
188 .recv = vtpm_recv,
189 .send = vtpm_send,
190 .cancel = vtpm_cancel,
191 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
192 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
193 .req_canceled = vtpm_req_canceled,
194 .attr_group = &vtpm_attr_grp,
195 .miscdev = {
196 .fops = &vtpm_ops,
197 },
Daniel De Graafe2683952013-07-30 13:29:47 -0400198};
199
200static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
201{
202 struct tpm_private *priv = dev_id;
203
204 switch (priv->shr->state) {
205 case VTPM_STATE_IDLE:
206 case VTPM_STATE_FINISH:
207 wake_up_interruptible(&priv->chip->vendor.read_queue);
208 break;
209 case VTPM_STATE_SUBMIT:
210 case VTPM_STATE_CANCEL:
211 default:
212 break;
213 }
214 return IRQ_HANDLED;
215}
216
217static int setup_chip(struct device *dev, struct tpm_private *priv)
218{
219 struct tpm_chip *chip;
220
221 chip = tpm_register_hardware(dev, &tpm_vtpm);
222 if (!chip)
223 return -ENODEV;
224
225 init_waitqueue_head(&chip->vendor.read_queue);
226
227 priv->chip = chip;
228 TPM_VPRIV(chip) = priv;
229
230 return 0;
231}
232
233/* caller must clean up in case of errors */
234static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
235{
236 struct xenbus_transaction xbt;
237 const char *message = NULL;
238 int rv;
239
240 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
241 if (!priv->shr) {
242 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
243 return -ENOMEM;
244 }
245
246 rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
247 if (rv < 0)
248 return rv;
249
250 priv->ring_ref = rv;
251
252 rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
253 if (rv)
254 return rv;
255
256 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
257 "tpmif", priv);
258 if (rv <= 0) {
259 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
260 return rv;
261 }
262 priv->chip->vendor.irq = rv;
263
264 again:
265 rv = xenbus_transaction_start(&xbt);
266 if (rv) {
267 xenbus_dev_fatal(dev, rv, "starting transaction");
268 return rv;
269 }
270
271 rv = xenbus_printf(xbt, dev->nodename,
272 "ring-ref", "%u", priv->ring_ref);
273 if (rv) {
274 message = "writing ring-ref";
275 goto abort_transaction;
276 }
277
278 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
279 priv->evtchn);
280 if (rv) {
281 message = "writing event-channel";
282 goto abort_transaction;
283 }
284
285 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
286 if (rv) {
287 message = "writing feature-protocol-v2";
288 goto abort_transaction;
289 }
290
291 rv = xenbus_transaction_end(xbt, 0);
292 if (rv == -EAGAIN)
293 goto again;
294 if (rv) {
295 xenbus_dev_fatal(dev, rv, "completing transaction");
296 return rv;
297 }
298
299 xenbus_switch_state(dev, XenbusStateInitialised);
300
301 return 0;
302
303 abort_transaction:
304 xenbus_transaction_end(xbt, 1);
305 if (message)
306 xenbus_dev_error(dev, rv, "%s", message);
307
308 return rv;
309}
310
311static void ring_free(struct tpm_private *priv)
312{
313 if (!priv)
314 return;
315
316 if (priv->ring_ref)
317 gnttab_end_foreign_access(priv->ring_ref, 0,
318 (unsigned long)priv->shr);
319 else
320 free_page((unsigned long)priv->shr);
321
322 if (priv->chip && priv->chip->vendor.irq)
323 unbind_from_irqhandler(priv->chip->vendor.irq, priv);
324
325 kfree(priv);
326}
327
328static int tpmfront_probe(struct xenbus_device *dev,
329 const struct xenbus_device_id *id)
330{
331 struct tpm_private *priv;
332 int rv;
333
334 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
335 if (!priv) {
336 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
337 return -ENOMEM;
338 }
339
340 rv = setup_chip(&dev->dev, priv);
341 if (rv) {
342 kfree(priv);
343 return rv;
344 }
345
346 rv = setup_ring(dev, priv);
347 if (rv) {
348 tpm_remove_hardware(&dev->dev);
349 ring_free(priv);
350 return rv;
351 }
352
353 tpm_get_timeouts(priv->chip);
354
Daniel De Graafe2683952013-07-30 13:29:47 -0400355 return rv;
356}
357
358static int tpmfront_remove(struct xenbus_device *dev)
359{
360 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
361 struct tpm_private *priv = TPM_VPRIV(chip);
362 tpm_remove_hardware(&dev->dev);
363 ring_free(priv);
364 TPM_VPRIV(chip) = NULL;
365 return 0;
366}
367
368static int tpmfront_resume(struct xenbus_device *dev)
369{
370 /* A suspend/resume/migrate will interrupt a vTPM anyway */
371 tpmfront_remove(dev);
372 return tpmfront_probe(dev, NULL);
373}
374
375static void backend_changed(struct xenbus_device *dev,
376 enum xenbus_state backend_state)
377{
378 int val;
379
380 switch (backend_state) {
381 case XenbusStateInitialised:
382 case XenbusStateConnected:
383 if (dev->state == XenbusStateConnected)
384 break;
385
386 if (xenbus_scanf(XBT_NIL, dev->otherend,
387 "feature-protocol-v2", "%d", &val) < 0)
388 val = 0;
389 if (!val) {
390 xenbus_dev_fatal(dev, -EINVAL,
391 "vTPM protocol 2 required");
392 return;
393 }
394 xenbus_switch_state(dev, XenbusStateConnected);
395 break;
396
397 case XenbusStateClosing:
398 case XenbusStateClosed:
399 device_unregister(&dev->dev);
400 xenbus_frontend_closed(dev);
401 break;
402 default:
403 break;
404 }
405}
406
407static const struct xenbus_device_id tpmfront_ids[] = {
408 { "vtpm" },
409 { "" }
410};
411MODULE_ALIAS("xen:vtpm");
412
413static DEFINE_XENBUS_DRIVER(tpmfront, ,
414 .probe = tpmfront_probe,
415 .remove = tpmfront_remove,
416 .resume = tpmfront_resume,
417 .otherend_changed = backend_changed,
418 );
419
420static int __init xen_tpmfront_init(void)
421{
422 if (!xen_domain())
423 return -ENODEV;
424
Konrad Rzeszutek Wilk51c71a32013-11-26 15:05:40 -0500425 if (!xen_has_pv_devices())
426 return -ENODEV;
427
Daniel De Graafe2683952013-07-30 13:29:47 -0400428 return xenbus_register_frontend(&tpmfront_driver);
429}
430module_init(xen_tpmfront_init);
431
432static void __exit xen_tpmfront_exit(void)
433{
434 xenbus_unregister_driver(&tpmfront_driver);
435}
436module_exit(xen_tpmfront_exit);
437
438MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
439MODULE_DESCRIPTION("Xen vTPM Driver");
440MODULE_LICENSE("GPL");