Merge branch 'master' into fix-time
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index a10bfea..c51fb97 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -1399,7 +1399,7 @@
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
+ "chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
}
// Schedule retry after computed delay.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 0b2a308..79deb0a 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -774,7 +774,7 @@
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO,
"[grpclb %p] Received initial LB response message; "
- "client load reporting interval = %" PRIdPTR " milliseconds",
+ "client load reporting interval = %" PRId64 " milliseconds",
grpclb_policy, lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_glb_trace.enabled()) {
@@ -1412,9 +1412,8 @@
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
- gpr_log(GPR_DEBUG,
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", this,
- timeout);
+ gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active in %" PRId64 "ms.",
+ this, timeout);
} else {
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
this);
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 6ac9a77..40a5c9a 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -346,7 +346,7 @@
RefCountedPtr<Resolver> self = r->Ref(DEBUG_LOCATION, "retry-timer");
self.release();
if (timeout > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
+ gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
@@ -372,8 +372,8 @@
const grpc_millis last_resolution_ago =
grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
gpr_log(GPR_DEBUG,
- "In cooldown from last resolution (from %" PRIdPTR
- " ms ago). Will resolve again in %" PRIdPTR " ms",
+ "In cooldown from last resolution (from %" PRId64
+ " ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
if (!have_next_resolution_timer_) {
have_next_resolution_timer_ = true;
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index fbab136..78ccf79 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -218,7 +218,7 @@
r->Ref(DEBUG_LOCATION, "next_resolution_timer");
self.release();
if (timeout > 0) {
- gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout);
+ gpr_log(GPR_DEBUG, "retrying in %" PRId64 " milliseconds", timeout);
} else {
gpr_log(GPR_DEBUG, "retrying immediately");
}
@@ -245,8 +245,8 @@
const grpc_millis last_resolution_ago =
grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_;
gpr_log(GPR_DEBUG,
- "In cooldown from last resolution (from %" PRIdPTR
- " ms ago). Will resolve again in %" PRIdPTR " ms",
+ "In cooldown from last resolution (from %" PRId64
+ " ms ago). Will resolve again in %" PRId64 " ms",
last_resolution_ago, ms_until_next_resolution);
if (!have_next_resolution_timer_) {
have_next_resolution_timer_ = true;
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index d7815fb..148ebbe 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -467,7 +467,7 @@
if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", c);
} else {
- gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRIdPTR " milliseconds", c,
+ gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRId64 " milliseconds", c,
time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 44d8cf2..377e286 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -1010,7 +1010,7 @@
#endif
if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG,
- "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
+ "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 8b80070..76545c4 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -244,10 +244,10 @@
static grpc_error* pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_millis deadline) {
- GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") begin", pollset,
+ GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset,
deadline);
grpc_error* err = g_event_engine->pollset_work(pollset, worker, deadline);
- GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") end", pollset,
+ GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset,
deadline);
return err;
}
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 2f544b2..5d5c355 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -53,24 +53,24 @@
static gpr_timespec g_start_time;
-static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
+static grpc_millis timespec_to_millis_round_down(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS;
if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return static_cast<gpr_atm>(x);
+ if (x > GRPC_MILLIS_INF_FUTURE) return GRPC_MILLIS_INF_FUTURE;
+ return static_cast<grpc_millis>(x);
}
-static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) {
+static grpc_millis timespec_to_millis_round_up(gpr_timespec ts) {
ts = gpr_time_sub(ts, g_start_time);
double x = GPR_MS_PER_SEC * static_cast<double>(ts.tv_sec) +
static_cast<double>(ts.tv_nsec) / GPR_NS_PER_MS +
static_cast<double>(GPR_NS_PER_SEC - 1) /
static_cast<double>(GPR_NS_PER_SEC);
if (x < 0) return 0;
- if (x > GPR_ATM_MAX) return GPR_ATM_MAX;
- return static_cast<gpr_atm>(x);
+ if (x > GRPC_MILLIS_INF_FUTURE) return GRPC_MILLIS_INF_FUTURE;
+ return static_cast<grpc_millis>(x);
}
gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
@@ -92,12 +92,12 @@
}
grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) {
- return timespec_to_atm_round_down(
+ return timespec_to_millis_round_down(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) {
- return timespec_to_atm_round_up(
+ return timespec_to_millis_round_up(
gpr_convert_clock_type(ts, g_start_time.clock_type));
}
@@ -138,7 +138,7 @@
grpc_millis ExecCtx::Now() {
if (!now_is_valid_) {
- now_ = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
+ now_ = timespec_to_millis_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
now_is_valid_ = true;
}
return now_;
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 72d0ae5..4cc5586 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -28,10 +28,10 @@
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/iomgr/closure.h"
-typedef gpr_atm grpc_millis;
+typedef int64_t grpc_millis;
-#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX
-#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN
+#define GRPC_MILLIS_INF_FUTURE INT64_MAX
+#define GRPC_MILLIS_INF_PAST INT64_MIN
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
diff --git a/src/core/lib/iomgr/pollset_custom.cc b/src/core/lib/iomgr/pollset_custom.cc
index 04bd104..783f46c 100644
--- a/src/core/lib/iomgr/pollset_custom.cc
+++ b/src/core/lib/iomgr/pollset_custom.cc
@@ -71,7 +71,7 @@
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
size_t timeout = 0;
if (deadline > now) {
- timeout = deadline - now;
+ timeout = (size_t)(deadline - now);
}
// We yield here because the poll() call might yield
// control back to the application
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index 5ff10d3..7f53447 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -28,7 +28,7 @@
#include "src/core/lib/iomgr/iomgr.h"
typedef struct grpc_timer {
- gpr_atm deadline;
+ grpc_millis deadline;
uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */
bool pending;
struct grpc_timer* next;
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index 0c6f236..4525984 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -34,6 +34,7 @@
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/time_averaged_stats.h"
#include "src/core/lib/iomgr/timer_heap.h"
@@ -59,9 +60,9 @@
gpr_mu mu;
grpc_time_averaged_stats stats;
/* All and only timers with deadlines <= this will be in the heap. */
- gpr_atm queue_deadline_cap;
+ grpc_millis queue_deadline_cap;
/* The deadline of the next timer due in this shard */
- gpr_atm min_deadline;
+ grpc_millis min_deadline;
/* Index of this timer_shard in the g_shard_queue */
uint32_t shard_queue_index;
/* This holds all timers with deadlines < queue_deadline_cap. Timers in this
@@ -209,15 +210,23 @@
#endif
+#if GPR_ARCH_64
+/* NOTE: TODO(sreek) - Currently the thread local storage support in grpc is
+ for intptr_t which means on 32-bit machines it is not wide enough to hold
+ grpc_millis which is 64-bit. Adding thread local support for 64 bit values
+ is a lot of work for very little gain. So we are currently restricting this
+ optimization to only 64 bit machines */
+
/* Thread local variable that stores the deadline of the next timer the thread
* has last-seen. This is an optimization to prevent the thread from checking
* shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
* an expensive operation) */
GPR_TLS_DECL(g_last_seen_min_timer);
+#endif
struct shared_mutables {
/* The deadline of the next timer due across all timer shards */
- gpr_atm min_timer;
+ grpc_millis min_timer;
/* Allow only one run_some_expired_timers at once */
gpr_spinlock checker_mu;
bool initialized;
@@ -227,18 +236,18 @@
static struct shared_mutables g_shared_mutables;
-static gpr_atm saturating_add(gpr_atm a, gpr_atm b) {
- if (a > GPR_ATM_MAX - b) {
- return GPR_ATM_MAX;
+static grpc_millis saturating_add(grpc_millis a, grpc_millis b) {
+ if (a > GRPC_MILLIS_INF_FUTURE - b) {
+ return GRPC_MILLIS_INF_FUTURE;
}
return a + b;
}
-static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
- gpr_atm* next,
+static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
+ grpc_millis* next,
grpc_error* error);
-static gpr_atm compute_min_deadline(timer_shard* shard) {
+static grpc_millis compute_min_deadline(timer_shard* shard) {
return grpc_timer_heap_is_empty(&shard->heap)
? saturating_add(shard->queue_deadline_cap, 1)
: grpc_timer_heap_top(&shard->heap)->deadline;
@@ -257,8 +266,11 @@
g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
gpr_mu_init(&g_shared_mutables.mu);
g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now();
+
+#if GPR_ARCH_64
gpr_tls_init(&g_last_seen_min_timer);
gpr_tls_set(&g_last_seen_min_timer, 0);
+#endif
for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
@@ -287,7 +299,11 @@
grpc_timer_heap_destroy(&shard->heap);
}
gpr_mu_destroy(&g_shared_mutables.mu);
+
+#if GPR_ARCH_64
gpr_tls_destroy(&g_last_seen_min_timer);
+#endif
+
gpr_free(g_shards);
gpr_free(g_shard_queue);
g_shared_mutables.initialized = false;
@@ -346,9 +362,9 @@
#endif
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
- deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
+ gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
+ timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
+ closure->cb);
}
if (!g_shared_mutables.initialized) {
@@ -383,7 +399,7 @@
}
if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG,
- " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
+ " .. add to shard %d with queue_deadline_cap=%" PRId64
" => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
@@ -404,15 +420,22 @@
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
+ gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRId64,
shard->min_deadline);
}
if (deadline < shard->min_deadline) {
- gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
+ grpc_millis old_min_deadline = g_shard_queue[0]->min_deadline;
shard->min_deadline = deadline;
note_deadline_change(shard);
if (shard->shard_queue_index == 0 && deadline < old_min_deadline) {
+#if GPR_ARCH_64
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline);
+#else
+ // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
+ // types (like grpc_millis). So all reads and writes to
+ // g_shared_mutables.min_timer varialbe under g_shared_mutables.mu
+ g_shared_mutables.min_timer = deadline;
+#endif
grpc_kick_poller();
}
}
@@ -421,8 +444,10 @@
}
static void timer_consume_kick(void) {
- /* force re-evaluation of last seeen min */
+#if GPR_ARCH_64
+ /* Force re-evaluation of last seen min */
gpr_tls_set(&g_last_seen_min_timer, 0);
+#endif
}
static void timer_cancel(grpc_timer* timer) {
@@ -459,7 +484,7 @@
'queue_deadline_cap') into into shard->heap.
Returns 'true' if shard->heap has atleast ONE element
REQUIRES: shard->mu locked */
-static int refill_heap(timer_shard* shard, gpr_atm now) {
+static int refill_heap(timer_shard* shard, grpc_millis now) {
/* Compute the new queue window width and bound by the limits: */
double computed_deadline_delta =
grpc_time_averaged_stats_update_average(&shard->stats) *
@@ -472,10 +497,10 @@
/* Compute the new cap and put all timers under it into the queue: */
shard->queue_deadline_cap =
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
- static_cast<gpr_atm>(deadline_delta * 1000.0));
+ static_cast<grpc_millis>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
+ gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
for (timer = shard->list.next; timer != &shard->list; timer = next) {
@@ -483,7 +508,7 @@
if (timer->deadline < shard->queue_deadline_cap) {
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
+ gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRId64 " to heap",
timer->deadline);
}
list_remove(timer);
@@ -496,7 +521,7 @@
/* This pops the next non-cancelled timer with deadline <= now from the
queue, or returns NULL if there isn't one.
REQUIRES: shard->mu locked */
-static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
+static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
grpc_timer* timer;
for (;;) {
if (grpc_timer_check_trace.enabled()) {
@@ -511,12 +536,12 @@
timer = grpc_timer_heap_top(&shard->heap);
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
- " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
+ " .. check top timer deadline=%" PRId64 " now=%" PRId64,
timer->deadline, now);
}
if (timer->deadline > now) return nullptr;
if (grpc_timer_trace.enabled()) {
- gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
+ gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRId64 "ms late via %s scheduler",
timer, now - timer->deadline,
timer->closure->scheduler->vtable->name);
}
@@ -527,8 +552,8 @@
}
/* REQUIRES: shard->mu unlocked */
-static size_t pop_timers(timer_shard* shard, gpr_atm now,
- gpr_atm* new_min_deadline, grpc_error* error) {
+static size_t pop_timers(timer_shard* shard, grpc_millis now,
+ grpc_millis* new_min_deadline, grpc_error* error) {
size_t n = 0;
grpc_timer* timer;
gpr_mu_lock(&shard->mu);
@@ -546,13 +571,22 @@
return n;
}
-static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
- gpr_atm* next,
+static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
+ grpc_millis* next,
grpc_error* error) {
grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
- gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
+#if GPR_ARCH_64
+ grpc_millis min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer);
gpr_tls_set(&g_last_seen_min_timer, min_timer);
+#else
+ // On 32-bit systems, gpr_atm_no_barrier_load does not work on 64-bit types
+ // (like grpc_millis). So all reads and writes to g_shared_mutables.min_timer
+ // are done under g_shared_mutables.mu
+ gpr_mu_lock(&g_shared_mutables.mu);
+ grpc_millis min_timer = g_shared_mutables.min_timer;
+ gpr_mu_unlock(&g_shared_mutables.mu);
+#endif
if (now < min_timer) {
if (next != nullptr) *next = GPR_MIN(*next, min_timer);
return GRPC_TIMERS_CHECKED_AND_EMPTY;
@@ -563,14 +597,15 @@
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
+ gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRId64,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
while (g_shard_queue[0]->min_deadline < now ||
- (now != GPR_ATM_MAX && g_shard_queue[0]->min_deadline == now)) {
- gpr_atm new_min_deadline;
+ (now != GRPC_MILLIS_INF_FUTURE &&
+ g_shard_queue[0]->min_deadline == now)) {
+ grpc_millis new_min_deadline;
/* For efficiency, we pop as many available timers as we can from the
shard. This may violate perfect timer deadline ordering, but that
@@ -582,8 +617,8 @@
if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG,
" .. result --> %d"
- ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
- ", now=%" PRIdPTR,
+ ", shard[%d]->min_deadline %" PRId64 " --> %" PRId64
+ ", now=%" PRId64,
result, static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline, new_min_deadline, now);
}
@@ -601,8 +636,15 @@
*next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
}
+#if GPR_ARCH_64
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
g_shard_queue[0]->min_deadline);
+#else
+ // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
+ // types (like grpc_millis). So all reads and writes to
+ // g_shared_mutables.min_timer are done under g_shared_mutables.mu
+ g_shared_mutables.min_timer = g_shard_queue[0]->min_deadline;
+#endif
gpr_mu_unlock(&g_shared_mutables.mu);
gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
}
@@ -616,17 +658,28 @@
// prelude
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+#if GPR_ARCH_64
/* fetch from a thread-local first: this avoids contention on a globally
mutable cacheline in the common case */
grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer);
+#else
+ // On 32-bit systems, we currently do not have thread local support for 64-bit
+ // types. In this case, directly read from g_shared_mutables.min_timer.
+ // Also, note that on 32-bit systems, gpr_atm_no_barrier_store does not work
+ // on 64-bit types (like grpc_millis). So all reads and writes to
+ // g_shared_mutables.min_timer are done under g_shared_mutables.mu
+ gpr_mu_lock(&g_shared_mutables.mu);
+ grpc_millis min_timer = g_shared_mutables.min_timer;
+ gpr_mu_unlock(&g_shared_mutables.mu);
+#endif
+
if (now < min_timer) {
if (next != nullptr) {
*next = GPR_MIN(*next, min_timer);
}
if (grpc_timer_check_trace.enabled()) {
- gpr_log(GPR_DEBUG,
- "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
- min_timer);
+ gpr_log(GPR_DEBUG, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
+ now, min_timer);
}
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}
@@ -642,13 +695,19 @@
if (next == nullptr) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRIdPTR, *next);
+ gpr_asprintf(&next_str, "%" PRId64, *next);
}
+#if GPR_ARCH_64
gpr_log(GPR_DEBUG,
- "TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
- " glob_min=%" PRIdPTR,
- now, next_str, gpr_tls_get(&g_last_seen_min_timer),
+ "TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
+ " glob_min=%" PRId64,
+ now, next_str, min_timer,
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
+#else
+ gpr_log(GPR_DEBUG,
+ "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64, now,
+ next_str, min_timer);
+#endif
gpr_free(next_str);
}
// actual code
@@ -660,7 +719,7 @@
if (next == nullptr) {
next_str = gpr_strdup("NULL");
} else {
- gpr_asprintf(&next_str, "%" PRIdPTR, *next);
+ gpr_asprintf(&next_str, "%" PRId64, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str);
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index 94f288a..a14fa8d 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -172,8 +172,7 @@
if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
- gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
- wait_time);
+ gpr_log(GPR_DEBUG, "sleep for a %" PRId64 " milliseconds", wait_time);
}
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = GRPC_MILLIS_INF_FUTURE;
diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc
index 99af7c1..25ab492 100644
--- a/src/core/lib/transport/transport_op_string.cc
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -52,7 +52,7 @@
}
if (md.deadline != GRPC_MILLIS_INF_FUTURE) {
char* tmp;
- gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline);
+ gpr_asprintf(&tmp, " deadline=%" PRId64, md.deadline);
gpr_strvec_add(b, tmp);
}
}
diff --git a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
index 01c61a9..4fd7aed 100644
--- a/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
+++ b/test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
@@ -128,7 +128,7 @@
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
- gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
+ gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);
diff --git a/test/core/iomgr/resolve_address_posix_test.cc b/test/core/iomgr/resolve_address_posix_test.cc
index 79b2b50..e495e4c 100644
--- a/test/core/iomgr/resolve_address_posix_test.cc
+++ b/test/core/iomgr/resolve_address_posix_test.cc
@@ -91,7 +91,7 @@
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
- gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
+ gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);
diff --git a/test/core/iomgr/resolve_address_test.cc b/test/core/iomgr/resolve_address_test.cc
index a0dc484..2fb831a 100644
--- a/test/core/iomgr/resolve_address_test.cc
+++ b/test/core/iomgr/resolve_address_test.cc
@@ -82,7 +82,7 @@
break;
}
grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
- gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
+ gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64, done, time_left);
GPR_ASSERT(time_left >= 0);
grpc_pollset_worker* worker = nullptr;
gpr_mu_lock(args->mu);
diff --git a/test/core/iomgr/timer_heap_test.cc b/test/core/iomgr/timer_heap_test.cc
index ebe5e32..ebe5e66 100644
--- a/test/core/iomgr/timer_heap_test.cc
+++ b/test/core/iomgr/timer_heap_test.cc
@@ -204,7 +204,7 @@
}
if (num_inserted) {
- gpr_atm* min_deadline = nullptr;
+ grpc_millis* min_deadline = nullptr;
for (size_t i = 0; i < elems_size; i++) {
if (elems[i].inserted) {
if (min_deadline == nullptr) {
diff --git a/test/core/transport/timeout_encoding_test.cc b/test/core/transport/timeout_encoding_test.cc
index 2367acc..b7044b5 100644
--- a/test/core/transport/timeout_encoding_test.cc
+++ b/test/core/transport/timeout_encoding_test.cc
@@ -71,7 +71,7 @@
GPR_ASSERT(1 == grpc_http2_decode_timeout(
grpc_slice_from_static_string(buffer), &got));
if (got != expected) {
- gpr_log(GPR_ERROR, "got:'%" PRIdPTR "' != expected:'%" PRIdPTR "'", got,
+ gpr_log(GPR_ERROR, "got:'%" PRId64 "' != expected:'%" PRId64 "'", got,
expected);
abort();
}
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index a39e443..c2f3281 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -362,7 +362,7 @@
grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %ld milliseconds", waited_ms);
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
// We should have waited at least kInitialBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited_ms, kInitialBackOffMs - 1);
@@ -391,7 +391,7 @@
grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2));
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %ld ms", waited_ms);
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " ms", waited_ms);
// We should have waited at least kMinReconnectBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1);