blob: fbc90dce1de82f5dffd75e765390a1fec6d6dc7e [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070034#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020035#include <linux/file.h>
36#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040038#include <linux/export.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100039#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000040#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041#include "ttm/ttm_module.h"
42#include "ttm/ttm_bo_driver.h"
43#include "ttm/ttm_placement.h"
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000044#include "ttm/ttm_page_alloc.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045
46static int ttm_tt_swapin(struct ttm_tt *ttm);
47
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048/**
49 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050 */
51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52{
Dave Airlie72e942d2010-03-09 06:33:26 +000053 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050054 ttm->dma_address = drm_calloc_large(ttm->num_pages,
55 sizeof(*ttm->dma_address));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020056}
57
58static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
59{
Dave Airlie72e942d2010-03-09 06:33:26 +000060 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020061 ttm->pages = NULL;
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050062 drm_free_large(ttm->dma_address);
63 ttm->dma_address = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020064}
65
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020066static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
67{
68 struct page *p;
Thomas Hellstroma987fca2009-08-18 16:51:56 +020069 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020070 int ret;
71
Jerome Glisse667b7a22011-11-01 15:57:22 -040072 if (NULL == (p = ttm->pages[index])) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020073
Jerome Glisse822c4d92011-11-10 18:24:09 -050074 ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
Dave Airliea2c06ee2011-02-23 14:24:01 +100075 &ttm->dma_address[index]);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000076 if (ret != 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020077 return NULL;
78
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020079 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
80 if (unlikely(ret != 0))
81 goto out_err;
82
Jerome Glisse667b7a22011-11-01 15:57:22 -040083 ttm->pages[index] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020084 }
85 return p;
86out_err:
Jerome Glisse822c4d92011-11-10 18:24:09 -050087 ttm_put_pages(&p, 1, ttm->page_flags,
Jerome Glisse5e265682011-11-03 01:22:39 -040088 ttm->caching_state, &ttm->dma_address[index]);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020089 return NULL;
90}
91
92struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
93{
94 int ret;
95
96 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
97 ret = ttm_tt_swapin(ttm);
98 if (unlikely(ret != 0))
99 return NULL;
100 }
101 return __ttm_tt_get_page(ttm, index);
102}
103
104int ttm_tt_populate(struct ttm_tt *ttm)
105{
106 struct page *page;
107 unsigned long i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200108 int ret;
109
110 if (ttm->state != tt_unpopulated)
111 return 0;
112
113 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
114 ret = ttm_tt_swapin(ttm);
115 if (unlikely(ret != 0))
116 return ret;
117 }
118
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200119 for (i = 0; i < ttm->num_pages; ++i) {
120 page = __ttm_tt_get_page(ttm, i);
121 if (!page)
122 return -ENOMEM;
123 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200124 ttm->state = tt_unbound;
125 return 0;
126}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100127EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200128
129#ifdef CONFIG_X86
130static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000131 enum ttm_caching_state c_old,
132 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200133{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100134 int ret = 0;
135
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200136 if (PageHighMem(p))
137 return 0;
138
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000139 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100140 /* p isn't in the default caching state, set it to
141 * writeback first to free its current memtype. */
142
143 ret = set_pages_wb(p, 1);
144 if (ret)
145 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200146 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100147
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000148 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100149 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000150 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100151 ret = set_pages_uc(p, 1);
152
153 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200154}
155#else /* CONFIG_X86 */
156static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000157 enum ttm_caching_state c_old,
158 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200159{
160 return 0;
161}
162#endif /* CONFIG_X86 */
163
164/*
165 * Change caching policy for the linear kernel map
166 * for range of pages in a ttm.
167 */
168
169static int ttm_tt_set_caching(struct ttm_tt *ttm,
170 enum ttm_caching_state c_state)
171{
172 int i, j;
173 struct page *cur_page;
174 int ret;
175
176 if (ttm->caching_state == c_state)
177 return 0;
178
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000179 if (ttm->state == tt_unpopulated) {
180 /* Change caching but don't populate */
181 ttm->caching_state = c_state;
182 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200183 }
184
185 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000186 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200187
188 for (i = 0; i < ttm->num_pages; ++i) {
189 cur_page = ttm->pages[i];
190 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000191 ret = ttm_tt_set_page_caching(cur_page,
192 ttm->caching_state,
193 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194 if (unlikely(ret != 0))
195 goto out_err;
196 }
197 }
198
199 ttm->caching_state = c_state;
200
201 return 0;
202
203out_err:
204 for (j = 0; j < i; ++j) {
205 cur_page = ttm->pages[j];
206 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000207 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208 ttm->caching_state);
209 }
210 }
211
212 return ret;
213}
214
215int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
216{
217 enum ttm_caching_state state;
218
219 if (placement & TTM_PL_FLAG_WC)
220 state = tt_wc;
221 else if (placement & TTM_PL_FLAG_UNCACHED)
222 state = tt_uncached;
223 else
224 state = tt_cached;
225
226 return ttm_tt_set_caching(ttm, state);
227}
Dave Airliedf67bed2009-10-30 13:31:26 +1000228EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200229
230static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
231{
Jerome Glisse822c4d92011-11-10 18:24:09 -0500232 unsigned i;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000233
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200234 for (i = 0; i < ttm->num_pages; ++i) {
Jerome Glisse822c4d92011-11-10 18:24:09 -0500235 if (ttm->pages[i]) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200236 ttm_mem_global_free_page(ttm->glob->mem_glob,
Jerome Glisse822c4d92011-11-10 18:24:09 -0500237 ttm->pages[i]);
238 ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
239 ttm->caching_state, &ttm->dma_address[i]);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200240 }
241 }
242 ttm->state = tt_unpopulated;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200243}
244
245void ttm_tt_destroy(struct ttm_tt *ttm)
246{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200247 if (unlikely(ttm == NULL))
248 return;
249
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400250 if (ttm->state == tt_bound) {
251 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200252 }
253
254 if (likely(ttm->pages != NULL)) {
Jerome Glisse33164972011-11-01 15:45:57 -0400255 ttm_tt_free_alloced_pages(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200256 ttm_tt_free_page_directory(ttm);
257 }
258
Jan Engelhardt5df23972011-04-04 01:25:18 +0200259 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200260 ttm->swap_storage)
261 fput(ttm->swap_storage);
262
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400263 ttm->swap_storage = NULL;
264 ttm->func->destroy(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200265}
266
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400267int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
268 unsigned long size, uint32_t page_flags,
269 struct page *dummy_read_page)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200270{
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400271 ttm->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200272 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200273 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200274 ttm->caching_state = tt_cached;
275 ttm->page_flags = page_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200276 ttm->dummy_read_page = dummy_read_page;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400277 ttm->state = tt_unpopulated;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200278
279 ttm_tt_alloc_page_directory(ttm);
Jerome Glissef9517e62011-11-01 19:07:31 -0400280 if (!ttm->pages || !ttm->dma_address) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200281 ttm_tt_destroy(ttm);
282 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400283 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200284 }
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400285 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200286}
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400287EXPORT_SYMBOL(ttm_tt_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288
289void ttm_tt_unbind(struct ttm_tt *ttm)
290{
291 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200292
293 if (ttm->state == tt_bound) {
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400294 ret = ttm->func->unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295 BUG_ON(ret);
296 ttm->state = tt_unbound;
297 }
298}
299
300int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
301{
302 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200303
304 if (!ttm)
305 return -EINVAL;
306
307 if (ttm->state == tt_bound)
308 return 0;
309
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200310 ret = ttm_tt_populate(ttm);
311 if (ret)
312 return ret;
313
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400314 ret = ttm->func->bind(ttm, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200315 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200316 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200317
318 ttm->state = tt_bound;
319
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200320 return 0;
321}
322EXPORT_SYMBOL(ttm_tt_bind);
323
324static int ttm_tt_swapin(struct ttm_tt *ttm)
325{
326 struct address_space *swap_space;
327 struct file *swap_storage;
328 struct page *from_page;
329 struct page *to_page;
330 void *from_virtual;
331 void *to_virtual;
332 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100333 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200334
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200335 swap_storage = ttm->swap_storage;
336 BUG_ON(swap_storage == NULL);
337
338 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
339
340 for (i = 0; i < ttm->num_pages; ++i) {
Hugh Dickins3142b652011-06-27 16:18:17 -0700341 from_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100342 if (IS_ERR(from_page)) {
343 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200344 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100345 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200346 to_page = __ttm_tt_get_page(ttm, i);
347 if (unlikely(to_page == NULL))
348 goto out_err;
349
350 preempt_disable();
351 from_virtual = kmap_atomic(from_page, KM_USER0);
352 to_virtual = kmap_atomic(to_page, KM_USER1);
353 memcpy(to_virtual, from_virtual, PAGE_SIZE);
354 kunmap_atomic(to_virtual, KM_USER1);
355 kunmap_atomic(from_virtual, KM_USER0);
356 preempt_enable();
357 page_cache_release(from_page);
358 }
359
Jan Engelhardt5df23972011-04-04 01:25:18 +0200360 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200361 fput(swap_storage);
362 ttm->swap_storage = NULL;
363 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
364
365 return 0;
366out_err:
367 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100368 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200369}
370
Jan Engelhardt5df23972011-04-04 01:25:18 +0200371int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200372{
373 struct address_space *swap_space;
374 struct file *swap_storage;
375 struct page *from_page;
376 struct page *to_page;
377 void *from_virtual;
378 void *to_virtual;
379 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100380 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200381
382 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
383 BUG_ON(ttm->caching_state != tt_cached);
384
Jan Engelhardt5df23972011-04-04 01:25:18 +0200385 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200386 swap_storage = shmem_file_setup("ttm swap",
387 ttm->num_pages << PAGE_SHIFT,
388 0);
389 if (unlikely(IS_ERR(swap_storage))) {
390 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100391 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200392 }
393 } else
Jan Engelhardt5df23972011-04-04 01:25:18 +0200394 swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200395
396 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
397
398 for (i = 0; i < ttm->num_pages; ++i) {
399 from_page = ttm->pages[i];
400 if (unlikely(from_page == NULL))
401 continue;
Hugh Dickins3142b652011-06-27 16:18:17 -0700402 to_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100403 if (unlikely(IS_ERR(to_page))) {
404 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200405 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100406 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200407 preempt_disable();
408 from_virtual = kmap_atomic(from_page, KM_USER0);
409 to_virtual = kmap_atomic(to_page, KM_USER1);
410 memcpy(to_virtual, from_virtual, PAGE_SIZE);
411 kunmap_atomic(to_virtual, KM_USER1);
412 kunmap_atomic(from_virtual, KM_USER0);
413 preempt_enable();
414 set_page_dirty(to_page);
415 mark_page_accessed(to_page);
416 page_cache_release(to_page);
417 }
418
419 ttm_tt_free_alloced_pages(ttm);
420 ttm->swap_storage = swap_storage;
421 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200422 if (persistent_swap_storage)
423 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200424
425 return 0;
426out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200427 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200428 fput(swap_storage);
429
Maarten Maathuis290e55052010-02-20 03:22:21 +0100430 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200431}