blob: 5bef4c6c02838a42911d8142473fd965df1a32f4 [file] [log] [blame]
Lingfeng Yanga50f1db2016-06-13 09:24:07 -07001/*
2 * Copyright (C) 2016 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/fdtable.h>
16#include <linux/file.h>
17#include <linux/init.h>
18#include <linux/miscdevice.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/platform_device.h>
22
23#include <linux/interrupt.h>
24#include <linux/kref.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
27
28#include <linux/io.h>
29#include <linux/mm.h>
30#include <linux/acpi.h>
31
32#include <linux/string.h>
Lingfeng Yanga50f1db2016-06-13 09:24:07 -070033
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -080034#include <linux/fs.h>
35#include <linux/syscalls.h>
36#include <linux/sync_file.h>
37#include <linux/fence.h>
38
39#include "goldfish_sync_timeline_fence.h"
Lingfeng Yanga50f1db2016-06-13 09:24:07 -070040
41#define ERR(...) printk(KERN_ERR __VA_ARGS__);
42
43#define INFO(...) printk(KERN_INFO __VA_ARGS__);
44
45#define DPRINT(...) pr_debug(__VA_ARGS__);
46
47#define DTRACE() DPRINT("%s: enter", __func__)
48
49/* The Goldfish sync driver is designed to provide a interface
50 * between the underlying host's sync device and the kernel's
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -080051 * fence sync framework..
Lingfeng Yanga50f1db2016-06-13 09:24:07 -070052 * The purpose of the device/driver is to enable lightweight
53 * creation and signaling of timelines and fences
54 * in order to synchronize the guest with host-side graphics events.
55 *
56 * Each time the interrupt trips, the driver
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -080057 * may perform a sync operation.
Lingfeng Yanga50f1db2016-06-13 09:24:07 -070058 */
59
60/* The operations are: */
61
62/* Ready signal - used to mark when irq should lower */
63#define CMD_SYNC_READY 0
64
65/* Create a new timeline. writes timeline handle */
66#define CMD_CREATE_SYNC_TIMELINE 1
67
68/* Create a fence object. reads timeline handle and time argument.
69 * Writes fence fd to the SYNC_REG_HANDLE register. */
70#define CMD_CREATE_SYNC_FENCE 2
71
72/* Increments timeline. reads timeline handle and time argument */
73#define CMD_SYNC_TIMELINE_INC 3
74
75/* Destroys a timeline. reads timeline handle */
76#define CMD_DESTROY_SYNC_TIMELINE 4
77
78/* Starts a wait on the host with
79 * the given glsync object and sync thread handle. */
80#define CMD_TRIGGER_HOST_WAIT 5
81
82/* The register layout is: */
83
84#define SYNC_REG_BATCH_COMMAND 0x00 /* host->guest batch commands */
85#define SYNC_REG_BATCH_GUESTCOMMAND 0x04 /* guest->host batch commands */
86#define SYNC_REG_BATCH_COMMAND_ADDR 0x08 /* communicate physical address of host->guest batch commands */
87#define SYNC_REG_BATCH_COMMAND_ADDR_HIGH 0x0c /* 64-bit part */
88#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR 0x10 /* communicate physical address of guest->host commands */
89#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */
90#define SYNC_REG_INIT 0x18 /* signals that the device has been probed */
91
92/* There is an ioctl associated with goldfish sync driver.
93 * Make it conflict with ioctls that are not likely to be used
94 * in the emulator.
95 *
96 * '@' 00-0F linux/radeonfb.h conflict!
97 * '@' 00-0F drivers/video/aty/aty128fb.c conflict!
98 */
99#define GOLDFISH_SYNC_IOC_MAGIC '@'
100
101#define GOLDFISH_SYNC_IOC_QUEUE_WORK _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
102
103/* The above definitions (command codes, register layout, ioctl definitions)
104 * need to be in sync with the following files:
105 *
106 * Host-side (emulator):
107 * external/qemu/android/emulation/goldfish_sync.h
108 * external/qemu-android/hw/misc/goldfish_sync.c
109 *
110 * Guest-side (system image):
111 * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
112 * device/generic/goldfish/ueventd.ranchu.rc
113 * platform/build/target/board/generic/sepolicy/file_contexts
114 */
115struct goldfish_sync_hostcmd {
116 /* sorted for alignment */
117 uint64_t handle;
118 uint64_t hostcmd_handle;
119 uint32_t cmd;
120 uint32_t time_arg;
121};
122
123struct goldfish_sync_guestcmd {
124 uint64_t host_command; /* uint64_t for alignment */
125 uint64_t glsync_handle;
126 uint64_t thread_handle;
127 uint64_t guest_timeline_handle;
128};
129
Lingfeng Yange4254962017-02-09 07:43:47 -0800130#define GOLDFISH_SYNC_MAX_CMDS 32
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700131
132struct goldfish_sync_state {
133 char __iomem *reg_base;
134 int irq;
135
136 /* Spinlock protects |to_do| / |to_do_end|. */
137 spinlock_t lock;
138 /* |mutex_lock| protects all concurrent access
139 * to timelines for both kernel and user space. */
140 struct mutex mutex_lock;
141
142 /* Buffer holding commands issued from host. */
143 struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
144 uint32_t to_do_end;
145
146 /* Addresses for the reading or writing
147 * of individual commands. The host can directly write
148 * to |batch_hostcmd| (and then this driver immediately
149 * copies contents to |to_do|). This driver either replies
150 * through |batch_hostcmd| or simply issues a
151 * guest->host command through |batch_guestcmd|.
152 */
153 struct goldfish_sync_hostcmd *batch_hostcmd;
154 struct goldfish_sync_guestcmd *batch_guestcmd;
155
156 /* Used to give this struct itself to a work queue
157 * function for executing actual sync commands. */
158 struct work_struct work_item;
159};
160
161static struct goldfish_sync_state global_sync_state[1];
162
163struct goldfish_sync_timeline_obj {
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800164 struct goldfish_sync_timeline *sync_tl;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700165 uint32_t current_time;
166 /* We need to be careful about when we deallocate
167 * this |goldfish_sync_timeline_obj| struct.
168 * In order to ensure proper cleanup, we need to
169 * consider the triggered host-side wait that may
170 * still be in flight when the guest close()'s a
171 * goldfish_sync device's sync context fd (and
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800172 * destroys the |sync_tl| field above).
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700173 * The host-side wait may raise IRQ
174 * and tell the kernel to increment the timeline _after_
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800175 * the |sync_tl| has already been set to null.
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700176 *
177 * From observations on OpenGL apps and CTS tests, this
178 * happens at some very low probability upon context
179 * destruction or process close, but it does happen
180 * and it needs to be handled properly. Otherwise,
181 * if we clean up the surrounding |goldfish_sync_timeline_obj|
182 * too early, any |handle| field of any host->guest command
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800183 * might not even point to a null |sync_tl| field,
184 * but to garbage memory or even a reclaimed |sync_tl|.
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700185 * If we do not count such "pending waits" and kfree the object
186 * immediately upon |goldfish_sync_timeline_destroy|,
187 * we might get mysterous RCU stalls after running a long
188 * time because the garbage memory that is being read
189 * happens to be interpretable as a |spinlock_t| struct
190 * that is currently in the locked state.
191 *
192 * To track when to free the |goldfish_sync_timeline_obj|
193 * itself, we maintain a kref.
194 * The kref essentially counts the timeline itself plus
195 * the number of waits in flight. kref_init/kref_put
196 * are issued on
197 * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy|
198 * and kref_get/kref_put are issued on
199 * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|.
200 *
201 * The timeline is destroyed after reference count
202 * reaches zero, which would happen after
203 * |goldfish_sync_timeline_destroy| and all pending
204 * |goldfish_sync_timeline_inc|'s are fulfilled.
205 *
206 * NOTE (1): We assume that |fence_create| and
207 * |timeline_inc| calls are 1:1, otherwise the kref scheme
208 * will not work. This is a valid assumption as long
209 * as the host-side virtual device implementation
210 * does not insert any timeline increments
211 * that we did not trigger from here.
212 *
213 * NOTE (2): The use of kref by itself requires no locks,
214 * but this does not mean everything works without locks.
215 * Related timeline operations do require a lock of some sort,
216 * or at least are not proven to work without it.
217 * In particualr, we assume that all the operations
218 * done on the |kref| field above are done in contexts where
219 * |global_sync_state->mutex_lock| is held. Do not
220 * remove that lock until everything is proven to work
221 * without it!!! */
222 struct kref kref;
223};
224
225/* We will call |delete_timeline_obj| when the last reference count
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800226 * of the kref is decremented. This deletes the sync
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700227 * timeline object along with the wrapper itself. */
228static void delete_timeline_obj(struct kref* kref) {
229 struct goldfish_sync_timeline_obj* obj =
230 container_of(kref, struct goldfish_sync_timeline_obj, kref);
231
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800232 goldfish_sync_timeline_put_internal(obj->sync_tl);
233 obj->sync_tl = NULL;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700234 kfree(obj);
235}
236
237static uint64_t gensym_ctr;
238static void gensym(char *dst)
239{
240 sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr);
241 gensym_ctr++;
242}
243
244/* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock|
245 * is held. */
246static struct goldfish_sync_timeline_obj*
247goldfish_sync_timeline_create(void)
248{
249
250 char timeline_name[256];
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800251 struct goldfish_sync_timeline *res_sync_tl = NULL;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700252 struct goldfish_sync_timeline_obj *res;
253
254 DTRACE();
255
256 gensym(timeline_name);
257
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800258 res_sync_tl = goldfish_sync_timeline_create_internal(timeline_name);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700259 if (!res_sync_tl) {
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800260 ERR("Failed to create goldfish_sw_sync timeline.");
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700261 return NULL;
262 }
263
264 res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL);
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800265 res->sync_tl = res_sync_tl;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700266 res->current_time = 0;
267 kref_init(&res->kref);
268
269 DPRINT("new timeline_obj=0x%p", res);
270 return res;
271}
272
273/* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock|
274 * is held. */
275static int
276goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj,
277 uint32_t val)
278{
279
280 int fd;
281 char fence_name[256];
282 struct sync_pt *syncpt = NULL;
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800283 struct sync_file *sync_file_obj = NULL;
284 struct goldfish_sync_timeline *tl;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700285
286 DTRACE();
287
288 if (!obj) return -1;
289
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800290 tl = obj->sync_tl;
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700291
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800292 syncpt = goldfish_sync_pt_create_internal(
293 tl, sizeof(struct sync_pt) + 4, val);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700294 if (!syncpt) {
295 ERR("could not create sync point! "
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800296 "goldfish_sync_timeline=0x%p val=%d",
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700297 tl, val);
298 return -1;
299 }
300
301 fd = get_unused_fd_flags(O_CLOEXEC);
302 if (fd < 0) {
303 ERR("could not get unused fd for sync fence. "
304 "errno=%d", fd);
305 goto err_cleanup_pt;
306 }
307
308 gensym(fence_name);
309
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800310 sync_file_obj = sync_file_create(&syncpt->base);
311 if (!sync_file_obj) {
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700312 ERR("could not create sync fence! "
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800313 "goldfish_sync_timeline=0x%p val=%d sync_pt=0x%p",
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700314 tl, val, syncpt);
315 goto err_cleanup_fd_pt;
316 }
317
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800318 DPRINT("installing sync fence into fd %d sync_file_obj=0x%p",
319 fd, sync_file_obj);
320 fd_install(fd, sync_file_obj->file);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700321 kref_get(&obj->kref);
322
323 return fd;
324
325err_cleanup_fd_pt:
326 put_unused_fd(fd);
327err_cleanup_pt:
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800328 fence_put(&syncpt->base);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700329 return -1;
330}
331
332/* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock|
333 * is held. */
334static void
335goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc)
336{
337 DTRACE();
338 /* Just give up if someone else nuked the timeline.
339 * Whoever it was won't care that it doesn't get signaled. */
340 if (!obj) return;
341
342 DPRINT("timeline_obj=0x%p", obj);
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800343 goldfish_sync_timeline_signal_internal(obj->sync_tl, inc);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700344 DPRINT("incremented timeline. increment max_time");
345 obj->current_time += inc;
346
347 /* Here, we will end up deleting the timeline object if it
348 * turns out that this call was a pending increment after
349 * |goldfish_sync_timeline_destroy| was called. */
350 kref_put(&obj->kref, delete_timeline_obj);
351 DPRINT("done");
352}
353
354/* |goldfish_sync_timeline_destroy| assumes
355 * that |global_sync_state->mutex_lock| is held. */
356static void
357goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj)
358{
359 DTRACE();
360 /* See description of |goldfish_sync_timeline_obj| for why we
361 * should not immediately destroy |obj| */
362 kref_put(&obj->kref, delete_timeline_obj);
363}
364
365static inline void
366goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
367 uint32_t cmd,
368 uint64_t handle,
369 uint32_t time_arg,
370 uint64_t hostcmd_handle)
371{
372 struct goldfish_sync_hostcmd *to_add;
373
374 DTRACE();
375
376 BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS);
377
378 to_add = &sync_state->to_do[sync_state->to_do_end];
379
380 to_add->cmd = cmd;
381 to_add->handle = handle;
382 to_add->time_arg = time_arg;
383 to_add->hostcmd_handle = hostcmd_handle;
384
385 sync_state->to_do_end += 1;
386}
387
388static inline void
389goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
390 uint32_t cmd,
391 uint64_t handle,
392 uint32_t time_arg,
393 uint64_t hostcmd_handle)
394{
395 unsigned long irq_flags;
396 struct goldfish_sync_hostcmd *batch_hostcmd =
397 sync_state->batch_hostcmd;
398
399 DTRACE();
400
401 spin_lock_irqsave(&sync_state->lock, irq_flags);
402
403 batch_hostcmd->cmd = cmd;
404 batch_hostcmd->handle = handle;
405 batch_hostcmd->time_arg = time_arg;
406 batch_hostcmd->hostcmd_handle = hostcmd_handle;
407 writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
408
409 spin_unlock_irqrestore(&sync_state->lock, irq_flags);
410}
411
412static inline void
413goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
414 uint32_t cmd,
415 uint64_t glsync_handle,
416 uint64_t thread_handle,
417 uint64_t timeline_handle)
418{
419 unsigned long irq_flags;
420 struct goldfish_sync_guestcmd *batch_guestcmd =
421 sync_state->batch_guestcmd;
422
423 DTRACE();
424
425 spin_lock_irqsave(&sync_state->lock, irq_flags);
426
427 batch_guestcmd->host_command = (uint64_t)cmd;
428 batch_guestcmd->glsync_handle = (uint64_t)glsync_handle;
429 batch_guestcmd->thread_handle = (uint64_t)thread_handle;
430 batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle;
431 writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
432
433 spin_unlock_irqrestore(&sync_state->lock, irq_flags);
434}
435
436/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
437 * In the context of OpenGL, this interrupt will fire whenever we need
438 * to signal a fence fd in the guest, with the command
439 * |CMD_SYNC_TIMELINE_INC|.
440 * However, because this function will be called in an interrupt context,
441 * it is necessary to do the actual work of signaling off of interrupt context.
442 * The shared work queue is used for this purpose. At the end when
443 * all pending commands are intercepted by the interrupt handler,
444 * we call |schedule_work|, which will later run the actual
445 * desired sync command in |goldfish_sync_work_item_fn|.
446 */
447static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
448{
449
450 struct goldfish_sync_state *sync_state = dev_id;
451
452 uint32_t nextcmd;
453 uint32_t command_r;
454 uint64_t handle_rw;
455 uint32_t time_r;
456 uint64_t hostcmd_handle_rw;
457
458 int count = 0;
459
460 DTRACE();
461
462 sync_state = dev_id;
463
464 spin_lock(&sync_state->lock);
465
466 for (;;) {
467
468 readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
469 nextcmd = sync_state->batch_hostcmd->cmd;
470
471 if (nextcmd == 0)
472 break;
473
474 command_r = nextcmd;
475 handle_rw = sync_state->batch_hostcmd->handle;
476 time_r = sync_state->batch_hostcmd->time_arg;
477 hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle;
478
479 goldfish_sync_cmd_queue(
480 sync_state,
481 command_r,
482 handle_rw,
483 time_r,
484 hostcmd_handle_rw);
485
486 count++;
487 }
488
489 spin_unlock(&sync_state->lock);
490
491 schedule_work(&sync_state->work_item);
492
493 return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
494}
495
496/* |goldfish_sync_work_item_fn| does the actual work of servicing
497 * host->guest sync commands. This function is triggered whenever
498 * the IRQ for the goldfish sync device is raised. Once it starts
499 * running, it grabs the contents of the buffer containing the
500 * commands it needs to execute (there may be multiple, because
501 * our IRQ is active high and not edge triggered), and then
502 * runs all of them one after the other.
503 */
504static void goldfish_sync_work_item_fn(struct work_struct *input)
505{
506
507 struct goldfish_sync_state *sync_state;
508 int sync_fence_fd;
509
510 struct goldfish_sync_timeline_obj *timeline;
511 uint64_t timeline_ptr;
512
513 uint64_t hostcmd_handle;
514
515 uint32_t cmd;
516 uint64_t handle;
517 uint32_t time_arg;
518
519 struct goldfish_sync_hostcmd *todo;
520 uint32_t todo_end;
521
522 unsigned long irq_flags;
523
524 struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
525 uint32_t i = 0;
526
527 sync_state = container_of(input, struct goldfish_sync_state, work_item);
528
529 mutex_lock(&sync_state->mutex_lock);
530
531 spin_lock_irqsave(&sync_state->lock, irq_flags); {
532
533 todo_end = sync_state->to_do_end;
534
535 DPRINT("num sync todos: %u", sync_state->to_do_end);
536
537 for (i = 0; i < todo_end; i++)
538 to_run[i] = sync_state->to_do[i];
539
540 /* We expect that commands will come in at a slow enough rate
541 * so that incoming items will not be more than
542 * GOLDFISH_SYNC_MAX_CMDS.
543 *
544 * This is because the way the sync device is used,
545 * it's only for managing buffer data transfers per frame,
546 * with a sequential dependency between putting things in
547 * to_do and taking them out. Once a set of commands is
548 * queued up in to_do, the user of the device waits for
549 * them to be processed before queuing additional commands,
550 * which limits the rate at which commands come in
551 * to the rate at which we take them out here.
552 *
553 * We also don't expect more than MAX_CMDS to be issued
554 * at once; there is a correspondence between
555 * which buffers need swapping to the (display / buffer queue)
556 * to particular commands, and we don't expect there to be
557 * enough display or buffer queues in operation at once
558 * to overrun GOLDFISH_SYNC_MAX_CMDS.
559 */
560 sync_state->to_do_end = 0;
561
562 } spin_unlock_irqrestore(&sync_state->lock, irq_flags);
563
564 for (i = 0; i < todo_end; i++) {
565 DPRINT("todo index: %u", i);
566
567 todo = &to_run[i];
568
569 cmd = todo->cmd;
570
571 handle = (uint64_t)todo->handle;
572 time_arg = todo->time_arg;
573 hostcmd_handle = (uint64_t)todo->hostcmd_handle;
574
575 DTRACE();
576
577 timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle;
578
579 switch (cmd) {
580 case CMD_SYNC_READY:
581 break;
582 case CMD_CREATE_SYNC_TIMELINE:
583 DPRINT("exec CMD_CREATE_SYNC_TIMELINE: "
584 "handle=0x%llx time_arg=%d",
585 handle, time_arg);
586 timeline = goldfish_sync_timeline_create();
587 timeline_ptr = (uintptr_t)timeline;
588 goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE,
589 timeline_ptr,
590 0,
591 hostcmd_handle);
592 DPRINT("sync timeline created: %p", timeline);
593 break;
594 case CMD_CREATE_SYNC_FENCE:
595 DPRINT("exec CMD_CREATE_SYNC_FENCE: "
596 "handle=0x%llx time_arg=%d",
597 handle, time_arg);
598 sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg);
599 goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE,
600 sync_fence_fd,
601 0,
602 hostcmd_handle);
603 break;
604 case CMD_SYNC_TIMELINE_INC:
605 DPRINT("exec CMD_SYNC_TIMELINE_INC: "
606 "handle=0x%llx time_arg=%d",
607 handle, time_arg);
608 goldfish_sync_timeline_inc(timeline, time_arg);
609 break;
610 case CMD_DESTROY_SYNC_TIMELINE:
611 DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: "
612 "handle=0x%llx time_arg=%d",
613 handle, time_arg);
614 goldfish_sync_timeline_destroy(timeline);
615 break;
616 }
617 DPRINT("Done executing sync command");
618 }
619 mutex_unlock(&sync_state->mutex_lock);
620}
621
622/* Guest-side interface: file operations */
623
624/* Goldfish sync context and ioctl info.
625 *
626 * When a sync context is created by open()-ing the goldfish sync device, we
627 * create a sync context (|goldfish_sync_context|).
628 *
629 * Currently, the only data required to track is the sync timeline itself
630 * along with the current time, which are all packed up in the
631 * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context|
632 * as the filp->private_data.
633 *
634 * Next, when a sync context user requests that work be queued and a fence
635 * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds
636 * information about which host handles to touch for this particular
637 * queue-work operation. We need to know about the host-side sync thread
638 * and the particular host-side GLsync object. We also possibly write out
639 * a file descriptor.
640 */
641struct goldfish_sync_context {
642 struct goldfish_sync_timeline_obj *timeline;
643};
644
645struct goldfish_sync_ioctl_info {
646 uint64_t host_glsync_handle_in;
647 uint64_t host_syncthread_handle_in;
648 int fence_fd_out;
649};
650
651static int goldfish_sync_open(struct inode *inode, struct file *file)
652{
653
654 struct goldfish_sync_context *sync_context;
655
656 DTRACE();
657
658 mutex_lock(&global_sync_state->mutex_lock);
659
660 sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL);
661
662 if (sync_context == NULL) {
663 ERR("Creation of goldfish sync context failed!");
664 mutex_unlock(&global_sync_state->mutex_lock);
665 return -ENOMEM;
666 }
667
668 sync_context->timeline = NULL;
669
670 file->private_data = sync_context;
671
672 DPRINT("successfully create a sync context @0x%p", sync_context);
673
674 mutex_unlock(&global_sync_state->mutex_lock);
675
676 return 0;
677}
678
679static int goldfish_sync_release(struct inode *inode, struct file *file)
680{
681
682 struct goldfish_sync_context *sync_context;
683
684 DTRACE();
685
686 mutex_lock(&global_sync_state->mutex_lock);
687
688 sync_context = file->private_data;
689
690 if (sync_context->timeline)
691 goldfish_sync_timeline_destroy(sync_context->timeline);
692
693 sync_context->timeline = NULL;
694
695 kfree(sync_context);
696
697 mutex_unlock(&global_sync_state->mutex_lock);
698
699 return 0;
700}
701
702/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
703 * and is used in conjunction with eglCreateSyncKHR to queue up the
704 * actual work of waiting for the EGL sync command to complete,
705 * possibly returning a fence fd to the guest.
706 */
707static long goldfish_sync_ioctl(struct file *file,
708 unsigned int cmd,
709 unsigned long arg)
710{
711 struct goldfish_sync_context *sync_context_data;
712 struct goldfish_sync_timeline_obj *timeline;
713 int fd_out;
714 struct goldfish_sync_ioctl_info ioctl_data;
715
716 DTRACE();
717
718 sync_context_data = file->private_data;
719 fd_out = -1;
720
721 switch (cmd) {
722 case GOLDFISH_SYNC_IOC_QUEUE_WORK:
723
724 DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK");
725
726 mutex_lock(&global_sync_state->mutex_lock);
727
728 if (copy_from_user(&ioctl_data,
729 (void __user *)arg,
730 sizeof(ioctl_data))) {
731 ERR("Failed to copy memory for ioctl_data from user.");
732 mutex_unlock(&global_sync_state->mutex_lock);
733 return -EFAULT;
734 }
735
736 if (ioctl_data.host_syncthread_handle_in == 0) {
737 DPRINT("Error: zero host syncthread handle!!!");
738 mutex_unlock(&global_sync_state->mutex_lock);
739 return -EFAULT;
740 }
741
742 if (!sync_context_data->timeline) {
743 DPRINT("no timeline yet, create one.");
744 sync_context_data->timeline = goldfish_sync_timeline_create();
745 DPRINT("timeline: 0x%p", &sync_context_data->timeline);
746 }
747
748 timeline = sync_context_data->timeline;
749 fd_out = goldfish_sync_fence_create(timeline,
750 timeline->current_time + 1);
751 DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)",
752 fd_out,
753 sync_context_data->timeline->current_time + 1,
754 sync_context_data->timeline);
755
756 ioctl_data.fence_fd_out = fd_out;
757
758 if (copy_to_user((void __user *)arg,
759 &ioctl_data,
760 sizeof(ioctl_data))) {
761 DPRINT("Error, could not copy to user!!!");
762
763 sys_close(fd_out);
764 /* We won't be doing an increment, kref_put immediately. */
765 kref_put(&timeline->kref, delete_timeline_obj);
766 mutex_unlock(&global_sync_state->mutex_lock);
767 return -EFAULT;
768 }
769
770 /* We are now about to trigger a host-side wait;
771 * accumulate on |pending_waits|. */
772 goldfish_sync_send_guestcmd(global_sync_state,
773 CMD_TRIGGER_HOST_WAIT,
774 ioctl_data.host_glsync_handle_in,
775 ioctl_data.host_syncthread_handle_in,
776 (uint64_t)(uintptr_t)(sync_context_data->timeline));
777
778 mutex_unlock(&global_sync_state->mutex_lock);
779 return 0;
780 default:
781 return -ENOTTY;
782 }
783}
784
785static const struct file_operations goldfish_sync_fops = {
786 .owner = THIS_MODULE,
787 .open = goldfish_sync_open,
788 .release = goldfish_sync_release,
789 .unlocked_ioctl = goldfish_sync_ioctl,
790 .compat_ioctl = goldfish_sync_ioctl,
791};
792
793static struct miscdevice goldfish_sync_device = {
794 .name = "goldfish_sync",
795 .fops = &goldfish_sync_fops,
796};
797
798
799static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state,
800 void *batch_addr,
801 uint32_t addr_offset,
802 uint32_t addr_offset_high)
803{
804 uint64_t batch_addr_phys;
805 uint32_t batch_addr_phys_test_lo;
806 uint32_t batch_addr_phys_test_hi;
807
808 if (!batch_addr) {
809 ERR("Could not use batch command address!");
810 return false;
811 }
812
813 batch_addr_phys = virt_to_phys(batch_addr);
814 writel((uint32_t)(batch_addr_phys),
815 sync_state->reg_base + addr_offset);
816 writel((uint32_t)(batch_addr_phys >> 32),
817 sync_state->reg_base + addr_offset_high);
818
819 batch_addr_phys_test_lo =
820 readl(sync_state->reg_base + addr_offset);
821 batch_addr_phys_test_hi =
822 readl(sync_state->reg_base + addr_offset_high);
823
824 if (virt_to_phys(batch_addr) !=
825 (((uint64_t)batch_addr_phys_test_hi << 32) |
826 batch_addr_phys_test_lo)) {
827 ERR("Invalid batch command address!");
828 return false;
829 }
830
831 return true;
832}
833
834int goldfish_sync_probe(struct platform_device *pdev)
835{
836 struct resource *ioresource;
837 struct goldfish_sync_state *sync_state = global_sync_state;
838 int status;
839
840 DTRACE();
841
842 sync_state->to_do_end = 0;
843
844 spin_lock_init(&sync_state->lock);
845 mutex_init(&sync_state->mutex_lock);
846
847 platform_set_drvdata(pdev, sync_state);
848
849 ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
850 if (ioresource == NULL) {
851 ERR("platform_get_resource failed");
852 return -ENODEV;
853 }
854
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800855 sync_state->reg_base =
856 devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700857 if (sync_state->reg_base == NULL) {
858 ERR("Could not ioremap");
859 return -ENOMEM;
860 }
861
862 sync_state->irq = platform_get_irq(pdev, 0);
863 if (sync_state->irq < 0) {
864 ERR("Could not platform_get_irq");
865 return -ENODEV;
866 }
867
868 status = devm_request_irq(&pdev->dev,
869 sync_state->irq,
870 goldfish_sync_interrupt,
871 IRQF_SHARED,
872 pdev->name,
873 sync_state);
874 if (status) {
875 ERR("request_irq failed");
876 return -ENODEV;
877 }
878
879 INIT_WORK(&sync_state->work_item,
880 goldfish_sync_work_item_fn);
881
882 misc_register(&goldfish_sync_device);
883
884 /* Obtain addresses for batch send/recv of commands. */
885 {
886 struct goldfish_sync_hostcmd *batch_addr_hostcmd;
887 struct goldfish_sync_guestcmd *batch_addr_guestcmd;
888
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800889 batch_addr_hostcmd =
890 devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd),
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700891 GFP_KERNEL);
Lingfeng Yang1cbd16b2017-01-31 23:28:39 -0800892 batch_addr_guestcmd =
893 devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd),
Lingfeng Yanga50f1db2016-06-13 09:24:07 -0700894 GFP_KERNEL);
895
896 if (!setup_verify_batch_cmd_addr(sync_state,
897 batch_addr_hostcmd,
898 SYNC_REG_BATCH_COMMAND_ADDR,
899 SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) {
900 ERR("goldfish_sync: Could not setup batch command address");
901 return -ENODEV;
902 }
903
904 if (!setup_verify_batch_cmd_addr(sync_state,
905 batch_addr_guestcmd,
906 SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
907 SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) {
908 ERR("goldfish_sync: Could not setup batch guest command address");
909 return -ENODEV;
910 }
911
912 sync_state->batch_hostcmd = batch_addr_hostcmd;
913 sync_state->batch_guestcmd = batch_addr_guestcmd;
914 }
915
916 INFO("goldfish_sync: Initialized goldfish sync device");
917
918 writel(0, sync_state->reg_base + SYNC_REG_INIT);
919
920 return 0;
921}
922
923static int goldfish_sync_remove(struct platform_device *pdev)
924{
925 struct goldfish_sync_state *sync_state = global_sync_state;
926
927 DTRACE();
928
929 misc_deregister(&goldfish_sync_device);
930 memset(sync_state, 0, sizeof(struct goldfish_sync_state));
931 return 0;
932}
933
934static const struct of_device_id goldfish_sync_of_match[] = {
935 { .compatible = "google,goldfish-sync", },
936 {},
937};
938MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
939
940static const struct acpi_device_id goldfish_sync_acpi_match[] = {
941 { "GFSH0006", 0 },
942 { },
943};
944
945MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
946
947static struct platform_driver goldfish_sync = {
948 .probe = goldfish_sync_probe,
949 .remove = goldfish_sync_remove,
950 .driver = {
951 .name = "goldfish_sync",
952 .of_match_table = goldfish_sync_of_match,
953 .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
954 }
955};
956
957module_platform_driver(goldfish_sync);
958
959MODULE_AUTHOR("Google, Inc.");
960MODULE_DESCRIPTION("Android QEMU Sync Driver");
961MODULE_LICENSE("GPL");
962MODULE_VERSION("1.0");