blob: 9b483705694e9a70e60af545b43b70738bb59ce3 [file] [log] [blame]
Daniel De Graafe2683952013-07-30 13:29:47 -04001/*
2 * Implementation of the Xen vTPM device frontend
3 *
4 * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2,
8 * as published by the Free Software Foundation.
9 */
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
13#include <xen/events.h>
14#include <xen/interface/io/tpmif.h>
15#include <xen/grant_table.h>
16#include <xen/xenbus.h>
17#include <xen/page.h>
18#include "tpm.h"
19
20struct tpm_private {
21 struct tpm_chip *chip;
22 struct xenbus_device *dev;
23
24 struct vtpm_shared_page *shr;
25
26 unsigned int evtchn;
27 int ring_ref;
28 domid_t backend_id;
29};
30
31enum status_bits {
32 VTPM_STATUS_RUNNING = 0x1,
33 VTPM_STATUS_IDLE = 0x2,
34 VTPM_STATUS_RESULT = 0x4,
35 VTPM_STATUS_CANCELED = 0x8,
36};
37
38static u8 vtpm_status(struct tpm_chip *chip)
39{
40 struct tpm_private *priv = TPM_VPRIV(chip);
41 switch (priv->shr->state) {
42 case VTPM_STATE_IDLE:
43 return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
44 case VTPM_STATE_FINISH:
45 return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
46 case VTPM_STATE_SUBMIT:
47 case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
48 return VTPM_STATUS_RUNNING;
49 default:
50 return 0;
51 }
52}
53
54static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
55{
56 return status & VTPM_STATUS_CANCELED;
57}
58
59static void vtpm_cancel(struct tpm_chip *chip)
60{
61 struct tpm_private *priv = TPM_VPRIV(chip);
62 priv->shr->state = VTPM_STATE_CANCEL;
63 wmb();
64 notify_remote_via_evtchn(priv->evtchn);
65}
66
67static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
68{
69 return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
70}
71
72static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
73{
74 struct tpm_private *priv = TPM_VPRIV(chip);
75 struct vtpm_shared_page *shr = priv->shr;
76 unsigned int offset = shr_data_offset(shr);
77
78 u32 ordinal;
79 unsigned long duration;
80
81 if (offset > PAGE_SIZE)
82 return -EINVAL;
83
84 if (offset + count > PAGE_SIZE)
85 return -EINVAL;
86
87 /* Wait for completion of any existing command or cancellation */
88 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
89 &chip->vendor.read_queue, true) < 0) {
90 vtpm_cancel(chip);
91 return -ETIME;
92 }
93
94 memcpy(offset + (u8 *)shr, buf, count);
95 shr->length = count;
96 barrier();
97 shr->state = VTPM_STATE_SUBMIT;
98 wmb();
99 notify_remote_via_evtchn(priv->evtchn);
100
101 ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
102 duration = tpm_calc_ordinal_duration(chip, ordinal);
103
104 if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
105 &chip->vendor.read_queue, true) < 0) {
106 /* got a signal or timeout, try to cancel */
107 vtpm_cancel(chip);
108 return -ETIME;
109 }
110
111 return count;
112}
113
114static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
115{
116 struct tpm_private *priv = TPM_VPRIV(chip);
117 struct vtpm_shared_page *shr = priv->shr;
118 unsigned int offset = shr_data_offset(shr);
119 size_t length = shr->length;
120
121 if (shr->state == VTPM_STATE_IDLE)
122 return -ECANCELED;
123
124 /* In theory the wait at the end of _send makes this one unnecessary */
125 if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
126 &chip->vendor.read_queue, true) < 0) {
127 vtpm_cancel(chip);
128 return -ETIME;
129 }
130
131 if (offset > PAGE_SIZE)
132 return -EIO;
133
134 if (offset + length > PAGE_SIZE)
135 length = PAGE_SIZE - offset;
136
137 if (length > count)
138 length = count;
139
140 memcpy(buf, offset + (u8 *)shr, length);
141
142 return length;
143}
144
145ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
146 char *buf)
147{
148 struct tpm_chip *chip = dev_get_drvdata(dev);
149 struct tpm_private *priv = TPM_VPRIV(chip);
150 u8 locality = priv->shr->locality;
151
152 return sprintf(buf, "%d\n", locality);
153}
154
155ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
156 const char *buf, size_t len)
157{
158 struct tpm_chip *chip = dev_get_drvdata(dev);
159 struct tpm_private *priv = TPM_VPRIV(chip);
160 u8 val;
161
162 int rv = kstrtou8(buf, 0, &val);
163 if (rv)
164 return rv;
165
166 priv->shr->locality = val;
167
168 return len;
169}
170
171static const struct file_operations vtpm_ops = {
172 .owner = THIS_MODULE,
173 .llseek = no_llseek,
174 .open = tpm_open,
175 .read = tpm_read,
176 .write = tpm_write,
177 .release = tpm_release,
178};
179
180static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
181static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
182static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
183static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
184static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
185static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
186 NULL);
187static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
188static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
189static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
190static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
191static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
192 tpm_store_locality);
193
194static struct attribute *vtpm_attrs[] = {
195 &dev_attr_pubek.attr,
196 &dev_attr_pcrs.attr,
197 &dev_attr_enabled.attr,
198 &dev_attr_active.attr,
199 &dev_attr_owned.attr,
200 &dev_attr_temp_deactivated.attr,
201 &dev_attr_caps.attr,
202 &dev_attr_cancel.attr,
203 &dev_attr_durations.attr,
204 &dev_attr_timeouts.attr,
205 &dev_attr_locality.attr,
206 NULL,
207};
208
209static struct attribute_group vtpm_attr_grp = {
210 .attrs = vtpm_attrs,
211};
212
213#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
214
215static const struct tpm_vendor_specific tpm_vtpm = {
216 .status = vtpm_status,
217 .recv = vtpm_recv,
218 .send = vtpm_send,
219 .cancel = vtpm_cancel,
220 .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
221 .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
222 .req_canceled = vtpm_req_canceled,
223 .attr_group = &vtpm_attr_grp,
224 .miscdev = {
225 .fops = &vtpm_ops,
226 },
227 .duration = {
228 TPM_LONG_TIMEOUT,
229 TPM_LONG_TIMEOUT,
230 TPM_LONG_TIMEOUT,
231 },
232};
233
234static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
235{
236 struct tpm_private *priv = dev_id;
237
238 switch (priv->shr->state) {
239 case VTPM_STATE_IDLE:
240 case VTPM_STATE_FINISH:
241 wake_up_interruptible(&priv->chip->vendor.read_queue);
242 break;
243 case VTPM_STATE_SUBMIT:
244 case VTPM_STATE_CANCEL:
245 default:
246 break;
247 }
248 return IRQ_HANDLED;
249}
250
251static int setup_chip(struct device *dev, struct tpm_private *priv)
252{
253 struct tpm_chip *chip;
254
255 chip = tpm_register_hardware(dev, &tpm_vtpm);
256 if (!chip)
257 return -ENODEV;
258
259 init_waitqueue_head(&chip->vendor.read_queue);
260
261 priv->chip = chip;
262 TPM_VPRIV(chip) = priv;
263
264 return 0;
265}
266
267/* caller must clean up in case of errors */
268static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
269{
270 struct xenbus_transaction xbt;
271 const char *message = NULL;
272 int rv;
273
274 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
275 if (!priv->shr) {
276 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
277 return -ENOMEM;
278 }
279
280 rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
281 if (rv < 0)
282 return rv;
283
284 priv->ring_ref = rv;
285
286 rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
287 if (rv)
288 return rv;
289
290 rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
291 "tpmif", priv);
292 if (rv <= 0) {
293 xenbus_dev_fatal(dev, rv, "allocating TPM irq");
294 return rv;
295 }
296 priv->chip->vendor.irq = rv;
297
298 again:
299 rv = xenbus_transaction_start(&xbt);
300 if (rv) {
301 xenbus_dev_fatal(dev, rv, "starting transaction");
302 return rv;
303 }
304
305 rv = xenbus_printf(xbt, dev->nodename,
306 "ring-ref", "%u", priv->ring_ref);
307 if (rv) {
308 message = "writing ring-ref";
309 goto abort_transaction;
310 }
311
312 rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
313 priv->evtchn);
314 if (rv) {
315 message = "writing event-channel";
316 goto abort_transaction;
317 }
318
319 rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
320 if (rv) {
321 message = "writing feature-protocol-v2";
322 goto abort_transaction;
323 }
324
325 rv = xenbus_transaction_end(xbt, 0);
326 if (rv == -EAGAIN)
327 goto again;
328 if (rv) {
329 xenbus_dev_fatal(dev, rv, "completing transaction");
330 return rv;
331 }
332
333 xenbus_switch_state(dev, XenbusStateInitialised);
334
335 return 0;
336
337 abort_transaction:
338 xenbus_transaction_end(xbt, 1);
339 if (message)
340 xenbus_dev_error(dev, rv, "%s", message);
341
342 return rv;
343}
344
345static void ring_free(struct tpm_private *priv)
346{
347 if (!priv)
348 return;
349
350 if (priv->ring_ref)
351 gnttab_end_foreign_access(priv->ring_ref, 0,
352 (unsigned long)priv->shr);
353 else
354 free_page((unsigned long)priv->shr);
355
356 if (priv->chip && priv->chip->vendor.irq)
357 unbind_from_irqhandler(priv->chip->vendor.irq, priv);
358
359 kfree(priv);
360}
361
362static int tpmfront_probe(struct xenbus_device *dev,
363 const struct xenbus_device_id *id)
364{
365 struct tpm_private *priv;
366 int rv;
367
368 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
369 if (!priv) {
370 xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
371 return -ENOMEM;
372 }
373
374 rv = setup_chip(&dev->dev, priv);
375 if (rv) {
376 kfree(priv);
377 return rv;
378 }
379
380 rv = setup_ring(dev, priv);
381 if (rv) {
382 tpm_remove_hardware(&dev->dev);
383 ring_free(priv);
384 return rv;
385 }
386
387 tpm_get_timeouts(priv->chip);
388
Daniel De Graafe2683952013-07-30 13:29:47 -0400389 return rv;
390}
391
392static int tpmfront_remove(struct xenbus_device *dev)
393{
394 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
395 struct tpm_private *priv = TPM_VPRIV(chip);
396 tpm_remove_hardware(&dev->dev);
397 ring_free(priv);
398 TPM_VPRIV(chip) = NULL;
399 return 0;
400}
401
402static int tpmfront_resume(struct xenbus_device *dev)
403{
404 /* A suspend/resume/migrate will interrupt a vTPM anyway */
405 tpmfront_remove(dev);
406 return tpmfront_probe(dev, NULL);
407}
408
409static void backend_changed(struct xenbus_device *dev,
410 enum xenbus_state backend_state)
411{
412 int val;
413
414 switch (backend_state) {
415 case XenbusStateInitialised:
416 case XenbusStateConnected:
417 if (dev->state == XenbusStateConnected)
418 break;
419
420 if (xenbus_scanf(XBT_NIL, dev->otherend,
421 "feature-protocol-v2", "%d", &val) < 0)
422 val = 0;
423 if (!val) {
424 xenbus_dev_fatal(dev, -EINVAL,
425 "vTPM protocol 2 required");
426 return;
427 }
428 xenbus_switch_state(dev, XenbusStateConnected);
429 break;
430
431 case XenbusStateClosing:
432 case XenbusStateClosed:
433 device_unregister(&dev->dev);
434 xenbus_frontend_closed(dev);
435 break;
436 default:
437 break;
438 }
439}
440
441static const struct xenbus_device_id tpmfront_ids[] = {
442 { "vtpm" },
443 { "" }
444};
445MODULE_ALIAS("xen:vtpm");
446
447static DEFINE_XENBUS_DRIVER(tpmfront, ,
448 .probe = tpmfront_probe,
449 .remove = tpmfront_remove,
450 .resume = tpmfront_resume,
451 .otherend_changed = backend_changed,
452 );
453
454static int __init xen_tpmfront_init(void)
455{
456 if (!xen_domain())
457 return -ENODEV;
458
459 return xenbus_register_frontend(&tpmfront_driver);
460}
461module_init(xen_tpmfront_init);
462
463static void __exit xen_tpmfront_exit(void)
464{
465 xenbus_unregister_driver(&tpmfront_driver);
466}
467module_exit(xen_tpmfront_exit);
468
469MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
470MODULE_DESCRIPTION("Xen vTPM Driver");
471MODULE_LICENSE("GPL");