nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 3 | * Copyright 2015 gRPC authors. |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 4 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 8 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 10 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 16 | * |
| 17 | */ |
| 18 | |
Nicolas "Pixel" Noble | d51d121 | 2016-01-31 11:33:19 +0100 | [diff] [blame] | 19 | #include <ruby/ruby.h> |
Nicolas "Pixel" Noble | 9fcdc87 | 2016-05-05 06:15:34 +0200 | [diff] [blame] | 20 | |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 21 | #include "rb_completion_queue.h" |
Craig Tiller | 5b1c5f2 | 2017-04-19 09:52:18 -0700 | [diff] [blame] | 22 | #include "rb_grpc_imports.generated.h" |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 23 | |
Yuki Yugui Sonoda | 2288791 | 2015-04-16 20:57:17 +0900 | [diff] [blame] | 24 | #include <ruby/thread.h> |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 25 | |
| 26 | #include <grpc/grpc.h> |
murgatroid99 | 9acc40f | 2016-06-30 13:54:09 -0700 | [diff] [blame] | 27 | #include <grpc/support/log.h> |
Craig Tiller | 5b1c5f2 | 2017-04-19 09:52:18 -0700 | [diff] [blame] | 28 | #include <grpc/support/time.h> |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 29 | #include "rb_grpc.h" |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 30 | |
| 31 | /* Used to allow grpc_completion_queue_next call to release the GIL */ |
| 32 | typedef struct next_call_stack { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 33 | grpc_completion_queue* cq; |
Craig Tiller | 3a48808 | 2015-05-11 09:02:07 -0700 | [diff] [blame] | 34 | grpc_event event; |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 35 | gpr_timespec timeout; |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 36 | void* tag; |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 37 | volatile int interrupted; |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 38 | } next_call_stack; |
| 39 | |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 40 | /* Calls grpc_completion_queue_pluck without holding the ruby GIL */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 41 | static void* grpc_rb_completion_queue_pluck_no_gil(void* param) { |
| 42 | next_call_stack* const next_call = (next_call_stack*)param; |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 43 | gpr_timespec increment = gpr_time_from_millis(20, GPR_TIMESPAN); |
| 44 | gpr_timespec deadline; |
| 45 | do { |
| 46 | deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), increment); |
Craig Tiller | 5b1c5f2 | 2017-04-19 09:52:18 -0700 | [diff] [blame] | 47 | next_call->event = grpc_completion_queue_pluck( |
| 48 | next_call->cq, next_call->tag, deadline, NULL); |
murgatroid99 | e68a715 | 2016-05-18 09:46:26 -0700 | [diff] [blame] | 49 | if (next_call->event.type != GRPC_QUEUE_TIMEOUT || |
| 50 | gpr_time_cmp(deadline, next_call->timeout) > 0) { |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 51 | break; |
| 52 | } |
| 53 | } while (!next_call->interrupted); |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 54 | return NULL; |
| 55 | } |
| 56 | |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 57 | /* Helper function to free a completion queue. */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 58 | void grpc_rb_completion_queue_destroy(grpc_completion_queue* cq) { |
murgatroid99 | 5756c68 | 2016-06-30 13:58:18 -0700 | [diff] [blame] | 59 | /* Every function that adds an event to a queue also synchronously plucks |
| 60 | that event from the queue, and holds a reference to the Ruby object that |
| 61 | holds the queue, so we only get to this point if all of those functions |
| 62 | have completed, and the queue is empty */ |
murgatroid99 | 9acc40f | 2016-06-30 13:54:09 -0700 | [diff] [blame] | 63 | grpc_completion_queue_shutdown(cq); |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 64 | grpc_completion_queue_destroy(cq); |
| 65 | } |
| 66 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 67 | static void unblock_func(void* param) { |
| 68 | next_call_stack* const next_call = (next_call_stack*)param; |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 69 | next_call->interrupted = 1; |
| 70 | } |
| 71 | |
murgatroid99 | ec1588b | 2016-06-06 15:37:45 -0700 | [diff] [blame] | 72 | /* Does the same thing as grpc_completion_queue_pluck, while properly releasing |
| 73 | the GVL and handling interrupts */ |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 74 | grpc_event rb_completion_queue_pluck(grpc_completion_queue* queue, void* tag, |
| 75 | gpr_timespec deadline, void* reserved) { |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 76 | next_call_stack next_call; |
| 77 | MEMZERO(&next_call, next_call_stack, 1); |
murgatroid99 | ec1588b | 2016-06-06 15:37:45 -0700 | [diff] [blame] | 78 | next_call.cq = queue; |
| 79 | next_call.timeout = deadline; |
| 80 | next_call.tag = tag; |
Craig Tiller | de4e3eb | 2015-05-11 18:51:32 -0700 | [diff] [blame] | 81 | next_call.event.type = GRPC_QUEUE_TIMEOUT; |
murgatroid99 | ec1588b | 2016-06-06 15:37:45 -0700 | [diff] [blame] | 82 | (void)reserved; |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 83 | /* Loop until we finish a pluck without an interruption. The internal |
| 84 | pluck function runs either until it is interrupted or it gets an |
| 85 | event, or time runs out. |
| 86 | |
| 87 | The basic reason we need this relatively complicated construction is that |
| 88 | we need to re-acquire the GVL when an interrupt comes in, so that the ruby |
murgatroid99 | e68a715 | 2016-05-18 09:46:26 -0700 | [diff] [blame] | 89 | interpreter can do what it needs to do with the interrupt. But we also need |
| 90 | to get back to plucking when the interrupt has been handled. */ |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 91 | do { |
| 92 | next_call.interrupted = 0; |
| 93 | rb_thread_call_without_gvl(grpc_rb_completion_queue_pluck_no_gil, |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 94 | (void*)&next_call, unblock_func, |
| 95 | (void*)&next_call); |
murgatroid99 | c0ecedb | 2016-05-16 16:14:52 -0700 | [diff] [blame] | 96 | /* If an interrupt prevented pluck from returning useful information, then |
| 97 | any plucks that did complete must have timed out */ |
Craig Tiller | 5b1c5f2 | 2017-04-19 09:52:18 -0700 | [diff] [blame] | 98 | } while (next_call.interrupted && next_call.event.type == GRPC_QUEUE_TIMEOUT); |
Tim Emiola | 6de558f | 2015-03-28 01:46:00 -0700 | [diff] [blame] | 99 | return next_call.event; |
nnoble | 097ef9b | 2014-12-01 17:06:10 -0800 | [diff] [blame] | 100 | } |