blob: 670b9b94f74bdf6919a5fe06b10c22cc0889180f [file] [log] [blame]
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +00001/*
2 * Copyright (C) 2011 Google, Inc.
3 * Copyright (C) 2012 Intel, Inc.
4 * Copyright (C) 2013 Intel, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This source file contains the implementation of a special device driver
18 * that intends to provide a *very* fast communication channel between the
19 * guest system and the QEMU emulator.
20 *
21 * Usage from the guest is simply the following (error handling simplified):
22 *
23 * int fd = open("/dev/qemu_pipe",O_RDWR);
24 * .... write() or read() through the pipe.
25 *
26 * This driver doesn't deal with the exact protocol used during the session.
27 * It is intended to be as simple as something like:
28 *
29 * // do this _just_ after opening the fd to connect to a specific
30 * // emulator service.
31 * const char* msg = "<pipename>";
32 * if (write(fd, msg, strlen(msg)+1) < 0) {
33 * ... could not connect to <pipename> service
34 * close(fd);
35 * }
36 *
37 * // after this, simply read() and write() to communicate with the
38 * // service. Exact protocol details left as an exercise to the reader.
39 *
40 * This driver is very fast because it doesn't copy any data through
41 * intermediate buffers, since the emulator is capable of translating
42 * guest user addresses into host ones.
43 *
44 * Note that we must however ensure that each user page involved in the
45 * exchange is properly mapped during a transfer.
46 */
47
48#include <linux/module.h>
49#include <linux/interrupt.h>
50#include <linux/kernel.h>
51#include <linux/spinlock.h>
52#include <linux/miscdevice.h>
53#include <linux/platform_device.h>
54#include <linux/poll.h>
55#include <linux/sched.h>
56#include <linux/bitops.h>
57#include <linux/slab.h>
58#include <linux/io.h>
59
60/*
61 * IMPORTANT: The following constants must match the ones used and defined
62 * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
63 */
64
65/* pipe device registers */
66#define PIPE_REG_COMMAND 0x00 /* write: value = command */
67#define PIPE_REG_STATUS 0x04 /* read */
68#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
Jun Tian49a75c42014-05-12 16:54:46 +010069#ifdef CONFIG_64BIT
70#define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
71#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +000072#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
73#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
Jun Tian49a75c42014-05-12 16:54:46 +010074#ifdef CONFIG_64BIT
75#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
76#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +000077#define PIPE_REG_WAKES 0x14 /* read: wake flags */
78#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
79#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
80#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
81
82/* list of commands for PIPE_REG_COMMAND */
83#define CMD_OPEN 1 /* open new channel */
84#define CMD_CLOSE 2 /* close channel (from guest) */
85#define CMD_POLL 3 /* poll read/write status */
86
87/* List of bitflags returned in status of CMD_POLL command */
88#define PIPE_POLL_IN (1 << 0)
89#define PIPE_POLL_OUT (1 << 1)
90#define PIPE_POLL_HUP (1 << 2)
91
92/* The following commands are related to write operations */
93#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
94#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
95 is possible */
96
97/* The following commands are related to read operations, they must be
98 * listed in the same order than the corresponding write ones, since we
99 * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
100 * in goldfish_pipe_read_write() below.
101 */
102#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
103#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
104 * is possible */
105
106/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
107#define PIPE_ERROR_INVAL -1
108#define PIPE_ERROR_AGAIN -2
109#define PIPE_ERROR_NOMEM -3
110#define PIPE_ERROR_IO -4
111
112/* Bit-flags used to signal events from the emulator */
113#define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
114#define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
115#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
116
117struct access_params {
Jun Tian49a75c42014-05-12 16:54:46 +0100118 unsigned long channel;
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000119 u32 size;
Jun Tian49a75c42014-05-12 16:54:46 +0100120 unsigned long address;
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000121 u32 cmd;
122 u32 result;
123 /* reserved for future extension */
124 u32 flags;
125};
126
127/* The global driver data. Holds a reference to the i/o page used to
128 * communicate with the emulator, and a wake queue for blocked tasks
129 * waiting to be awoken.
130 */
131struct goldfish_pipe_dev {
132 spinlock_t lock;
133 unsigned char __iomem *base;
134 struct access_params *aps;
135 int irq;
136};
137
138static struct goldfish_pipe_dev pipe_dev[1];
139
140/* This data type models a given pipe instance */
141struct goldfish_pipe {
142 struct goldfish_pipe_dev *dev;
143 struct mutex lock;
144 unsigned long flags;
145 wait_queue_head_t wake_queue;
146};
147
148
149/* Bit flags for the 'flags' field */
150enum {
151 BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
152 BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
153 BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
154};
155
156
157static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
158{
159 unsigned long flags;
160 u32 status;
161 struct goldfish_pipe_dev *dev = pipe->dev;
162
163 spin_lock_irqsave(&dev->lock, flags);
Jun Tian49a75c42014-05-12 16:54:46 +0100164 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
165#ifdef CONFIG_64BIT
166 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
167#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000168 writel(cmd, dev->base + PIPE_REG_COMMAND);
169 status = readl(dev->base + PIPE_REG_STATUS);
170 spin_unlock_irqrestore(&dev->lock, flags);
171 return status;
172}
173
174static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
175{
176 unsigned long flags;
177 struct goldfish_pipe_dev *dev = pipe->dev;
178
179 spin_lock_irqsave(&dev->lock, flags);
Jun Tian49a75c42014-05-12 16:54:46 +0100180 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
181#ifdef CONFIG_64BIT
182 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
183#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000184 writel(cmd, dev->base + PIPE_REG_COMMAND);
185 spin_unlock_irqrestore(&dev->lock, flags);
186}
187
188/* This function converts an error code returned by the emulator through
189 * the PIPE_REG_STATUS i/o register into a valid negative errno value.
190 */
191static int goldfish_pipe_error_convert(int status)
192{
193 switch (status) {
194 case PIPE_ERROR_AGAIN:
195 return -EAGAIN;
196 case PIPE_ERROR_NOMEM:
197 return -ENOMEM;
198 case PIPE_ERROR_IO:
199 return -EIO;
200 default:
201 return -EINVAL;
202 }
203}
204
205/*
206 * Notice: QEMU will return 0 for un-known register access, indicating
207 * param_acess is supported or not
208 */
209static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
210 struct access_params *aps)
211{
212 u32 aph, apl;
213 u64 paddr;
214 aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
215 apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
216
217 paddr = ((u64)aph << 32) | apl;
218 if (paddr != (__pa(aps)))
219 return 0;
220 return 1;
221}
222
223/* 0 on success */
224static int setup_access_params_addr(struct platform_device *pdev,
225 struct goldfish_pipe_dev *dev)
226{
227 u64 paddr;
228 struct access_params *aps;
229
230 aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
231 if (!aps)
232 return -1;
233
234 /* FIXME */
235 paddr = __pa(aps);
236 writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
237 writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
238
239 if (valid_batchbuffer_addr(dev, aps)) {
240 dev->aps = aps;
241 return 0;
242 } else
243 return -1;
244}
245
246/* A value that will not be set by qemu emulator */
247#define INITIAL_BATCH_RESULT (0xdeadbeaf)
248static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
249 unsigned long address, unsigned long avail,
250 struct goldfish_pipe *pipe, int *status)
251{
252 struct access_params *aps = dev->aps;
253
254 if (aps == NULL)
255 return -1;
256
257 aps->result = INITIAL_BATCH_RESULT;
258 aps->channel = (unsigned long)pipe;
259 aps->size = avail;
260 aps->address = address;
261 aps->cmd = cmd;
262 writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
263 /*
264 * If the aps->result has not changed, that means
265 * that the batch command failed
266 */
267 if (aps->result == INITIAL_BATCH_RESULT)
268 return -1;
269 *status = aps->result;
270 return 0;
271}
272
273/* This function is used for both reading from and writing to a given
274 * pipe.
275 */
276static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
277 size_t bufflen, int is_write)
278{
279 unsigned long irq_flags;
280 struct goldfish_pipe *pipe = filp->private_data;
281 struct goldfish_pipe_dev *dev = pipe->dev;
282 const int cmd_offset = is_write ? 0
283 : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
284 unsigned long address, address_end;
285 int ret = 0;
286
287 /* If the emulator already closed the pipe, no need to go further */
288 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
289 return -EIO;
290
291 /* Null reads or writes succeeds */
292 if (unlikely(bufflen) == 0)
293 return 0;
294
295 /* Check the buffer range for access */
296 if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
297 buffer, bufflen))
298 return -EFAULT;
299
300 /* Serialize access to the pipe */
301 if (mutex_lock_interruptible(&pipe->lock))
302 return -ERESTARTSYS;
303
304 address = (unsigned long)(void *)buffer;
305 address_end = address + bufflen;
306
307 while (address < address_end) {
308 unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
309 unsigned long next = page_end < address_end ? page_end
310 : address_end;
311 unsigned long avail = next - address;
312 int status, wakeBit;
313
314 /* Ensure that the corresponding page is properly mapped */
Alan Cox17c4c9d2013-01-23 14:13:18 +0000315 /* FIXME: this isn't safe or sufficient - use get_user_pages */
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000316 if (is_write) {
317 char c;
318 /* Ensure that the page is mapped and readable */
319 if (__get_user(c, (char __user *)address)) {
320 if (!ret)
321 ret = -EFAULT;
322 break;
323 }
324 } else {
325 /* Ensure that the page is mapped and writable */
326 if (__put_user(0, (char __user *)address)) {
327 if (!ret)
328 ret = -EFAULT;
329 break;
330 }
331 }
332
333 /* Now, try to transfer the bytes in the current page */
334 spin_lock_irqsave(&dev->lock, irq_flags);
335 if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
336 address, avail, pipe, &status)) {
Jun Tian49a75c42014-05-12 16:54:46 +0100337 writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
338#ifdef CONFIG_64BIT
339 writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
340#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000341 writel(avail, dev->base + PIPE_REG_SIZE);
342 writel(address, dev->base + PIPE_REG_ADDRESS);
Jun Tian49a75c42014-05-12 16:54:46 +0100343#ifdef CONFIG_64BIT
344 writel((u32)((u64)address >> 32), dev->base + PIPE_REG_ADDRESS_HIGH);
345#endif
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000346 writel(CMD_WRITE_BUFFER + cmd_offset,
347 dev->base + PIPE_REG_COMMAND);
348 status = readl(dev->base + PIPE_REG_STATUS);
349 }
350 spin_unlock_irqrestore(&dev->lock, irq_flags);
351
352 if (status > 0) { /* Correct transfer */
353 ret += status;
354 address += status;
355 continue;
356 }
357
358 if (status == 0) /* EOF */
359 break;
360
361 /* An error occured. If we already transfered stuff, just
362 * return with its count. We expect the next call to return
363 * an error code */
364 if (ret > 0)
365 break;
366
367 /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
368 * non-blocking mode, just return the error code.
369 */
370 if (status != PIPE_ERROR_AGAIN ||
371 (filp->f_flags & O_NONBLOCK) != 0) {
372 ret = goldfish_pipe_error_convert(status);
373 break;
374 }
375
376 /* We will have to wait until more data/space is available.
377 * First, mark the pipe as waiting for a specific wake signal.
378 */
379 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
380 set_bit(wakeBit, &pipe->flags);
381
382 /* Tell the emulator we're going to wait for a wake event */
383 goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
384
385 /* Unlock the pipe, then wait for the wake signal */
386 mutex_unlock(&pipe->lock);
387
388 while (test_bit(wakeBit, &pipe->flags)) {
389 if (wait_event_interruptible(
390 pipe->wake_queue,
391 !test_bit(wakeBit, &pipe->flags)))
392 return -ERESTARTSYS;
393
394 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
395 return -EIO;
396 }
397
398 /* Try to re-acquire the lock */
399 if (mutex_lock_interruptible(&pipe->lock))
400 return -ERESTARTSYS;
401
402 /* Try the transfer again */
403 continue;
404 }
405 mutex_unlock(&pipe->lock);
406 return ret;
407}
408
409static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
410 size_t bufflen, loff_t *ppos)
411{
412 return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
413}
414
415static ssize_t goldfish_pipe_write(struct file *filp,
416 const char __user *buffer, size_t bufflen,
417 loff_t *ppos)
418{
419 return goldfish_pipe_read_write(filp, (char __user *)buffer,
420 bufflen, 1);
421}
422
423
424static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
425{
426 struct goldfish_pipe *pipe = filp->private_data;
427 unsigned int mask = 0;
428 int status;
429
430 mutex_lock(&pipe->lock);
431
432 poll_wait(filp, &pipe->wake_queue, wait);
433
434 status = goldfish_cmd_status(pipe, CMD_POLL);
435
436 mutex_unlock(&pipe->lock);
437
438 if (status & PIPE_POLL_IN)
439 mask |= POLLIN | POLLRDNORM;
440
441 if (status & PIPE_POLL_OUT)
442 mask |= POLLOUT | POLLWRNORM;
443
444 if (status & PIPE_POLL_HUP)
445 mask |= POLLHUP;
446
447 if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
448 mask |= POLLERR;
449
450 return mask;
451}
452
453static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
454{
455 struct goldfish_pipe_dev *dev = dev_id;
456 unsigned long irq_flags;
457 int count = 0;
458
459 /* We're going to read from the emulator a list of (channel,flags)
460 * pairs corresponding to the wake events that occured on each
461 * blocked pipe (i.e. channel).
462 */
463 spin_lock_irqsave(&dev->lock, irq_flags);
464 for (;;) {
465 /* First read the channel, 0 means the end of the list */
466 struct goldfish_pipe *pipe;
467 unsigned long wakes;
Jun Tian49a75c42014-05-12 16:54:46 +0100468 unsigned long channel = 0;
469
470#ifdef CONFIG_64BIT
471 channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
Jun Tian25c72c72014-05-12 16:54:57 +0100472
473 if (channel == 0)
474 break;
Jun Tian49a75c42014-05-12 16:54:46 +0100475#endif
476 channel |= readl(dev->base + PIPE_REG_CHANNEL);
David 'Digit' Turnerc89f2752013-01-21 23:48:21 +0000477
478 if (channel == 0)
479 break;
480
481 /* Convert channel to struct pipe pointer + read wake flags */
482 wakes = readl(dev->base + PIPE_REG_WAKES);
483 pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
484
485 /* Did the emulator just closed a pipe? */
486 if (wakes & PIPE_WAKE_CLOSED) {
487 set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
488 wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
489 }
490 if (wakes & PIPE_WAKE_READ)
491 clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
492 if (wakes & PIPE_WAKE_WRITE)
493 clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
494
495 wake_up_interruptible(&pipe->wake_queue);
496 count++;
497 }
498 spin_unlock_irqrestore(&dev->lock, irq_flags);
499
500 return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
501}
502
503/**
504 * goldfish_pipe_open - open a channel to the AVD
505 * @inode: inode of device
506 * @file: file struct of opener
507 *
508 * Create a new pipe link between the emulator and the use application.
509 * Each new request produces a new pipe.
510 *
511 * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
512 * right now so this is fine. A move to 64bit will need this addressing
513 */
514static int goldfish_pipe_open(struct inode *inode, struct file *file)
515{
516 struct goldfish_pipe *pipe;
517 struct goldfish_pipe_dev *dev = pipe_dev;
518 int32_t status;
519
520 /* Allocate new pipe kernel object */
521 pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
522 if (pipe == NULL)
523 return -ENOMEM;
524
525 pipe->dev = dev;
526 mutex_init(&pipe->lock);
527 init_waitqueue_head(&pipe->wake_queue);
528
529 /*
530 * Now, tell the emulator we're opening a new pipe. We use the
531 * pipe object's address as the channel identifier for simplicity.
532 */
533
534 status = goldfish_cmd_status(pipe, CMD_OPEN);
535 if (status < 0) {
536 kfree(pipe);
537 return status;
538 }
539
540 /* All is done, save the pipe into the file's private data field */
541 file->private_data = pipe;
542 return 0;
543}
544
545static int goldfish_pipe_release(struct inode *inode, struct file *filp)
546{
547 struct goldfish_pipe *pipe = filp->private_data;
548
549 /* The guest is closing the channel, so tell the emulator right now */
550 goldfish_cmd(pipe, CMD_CLOSE);
551 kfree(pipe);
552 filp->private_data = NULL;
553 return 0;
554}
555
556static const struct file_operations goldfish_pipe_fops = {
557 .owner = THIS_MODULE,
558 .read = goldfish_pipe_read,
559 .write = goldfish_pipe_write,
560 .poll = goldfish_pipe_poll,
561 .open = goldfish_pipe_open,
562 .release = goldfish_pipe_release,
563};
564
565static struct miscdevice goldfish_pipe_device = {
566 .minor = MISC_DYNAMIC_MINOR,
567 .name = "goldfish_pipe",
568 .fops = &goldfish_pipe_fops,
569};
570
571static int goldfish_pipe_probe(struct platform_device *pdev)
572{
573 int err;
574 struct resource *r;
575 struct goldfish_pipe_dev *dev = pipe_dev;
576
577 /* not thread safe, but this should not happen */
578 WARN_ON(dev->base != NULL);
579
580 spin_lock_init(&dev->lock);
581
582 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
583 if (r == NULL || resource_size(r) < PAGE_SIZE) {
584 dev_err(&pdev->dev, "can't allocate i/o page\n");
585 return -EINVAL;
586 }
587 dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
588 if (dev->base == NULL) {
589 dev_err(&pdev->dev, "ioremap failed\n");
590 return -EINVAL;
591 }
592
593 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
594 if (r == NULL) {
595 err = -EINVAL;
596 goto error;
597 }
598 dev->irq = r->start;
599
600 err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
601 IRQF_SHARED, "goldfish_pipe", dev);
602 if (err) {
603 dev_err(&pdev->dev, "unable to allocate IRQ\n");
604 goto error;
605 }
606
607 err = misc_register(&goldfish_pipe_device);
608 if (err) {
609 dev_err(&pdev->dev, "unable to register device\n");
610 goto error;
611 }
612 setup_access_params_addr(pdev, dev);
613 return 0;
614
615error:
616 dev->base = NULL;
617 return err;
618}
619
620static int goldfish_pipe_remove(struct platform_device *pdev)
621{
622 struct goldfish_pipe_dev *dev = pipe_dev;
623 misc_deregister(&goldfish_pipe_device);
624 dev->base = NULL;
625 return 0;
626}
627
628static struct platform_driver goldfish_pipe = {
629 .probe = goldfish_pipe_probe,
630 .remove = goldfish_pipe_remove,
631 .driver = {
632 .name = "goldfish_pipe"
633 }
634};
635
636module_platform_driver(goldfish_pipe);
637MODULE_AUTHOR("David Turner <digit@google.com>");
638MODULE_LICENSE("GPL");