net: use ktime_get_ns() and ktime_get_real_ns() helpers
ktime_get_ns() replaces ktime_to_ns(ktime_get())
ktime_get_real_ns() replaces ktime_to_ns(ktime_get_real())
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/net/codel.h b/include/net/codel.h
index fe0eab3..aeee280 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -66,7 +66,7 @@
static inline codel_time_t codel_get_time(void)
{
- u64 ns = ktime_to_ns(ktime_get());
+ u64 ns = ktime_get_ns();
return ns >> CODEL_SHIFT;
}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ec030cd..8bbe626 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -50,7 +50,7 @@
static inline psched_time_t psched_get_time(void)
{
- return PSCHED_NS2TICKS(ktime_to_ns(ktime_get()));
+ return PSCHED_NS2TICKS(ktime_get_ns());
}
static inline psched_tdiff_t
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index ba71212..51dd319 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@
* overlaps less than one time per MSL (2 minutes).
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
- return seq + (ktime_to_ns(ktime_get_real()) >> 6);
+ return seq + (ktime_get_real_ns() >> 6);
}
#endif
@@ -135,7 +135,7 @@
md5_transform(hash, net_secret);
seq = hash[0] | (((u64)hash[1]) << 32);
- seq += ktime_to_ns(ktime_get_real());
+ seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
@@ -163,7 +163,7 @@
md5_transform(hash, secret);
seq = hash[0] | (((u64)hash[1]) << 32);
- seq += ktime_to_ns(ktime_get_real());
+ seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index de88c4a..0b634e7 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -358,7 +358,7 @@
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
+ tstamp->stop = ktime_get_real_ns();
if (nf_ct_is_dying(ct))
goto delete;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 355a5c4..1bd9ed9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1737,7 +1737,7 @@
}
tstamp = nf_conn_tstamp_find(ct);
if (tstamp)
- tstamp->start = ktime_to_ns(ktime_get_real());
+ tstamp->start = ktime_get_real_ns();
err = nf_conntrack_hash_check_insert(ct);
if (err < 0)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f641751..cf65a1e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -101,7 +101,7 @@
{
struct ct_iter_state *st = seq->private;
- st->time_now = ktime_to_ns(ktime_get_real());
+ st->time_now = ktime_get_real_ns();
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 0566e46..f32bcb0 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,7 +231,7 @@
if (ret != ACT_P_CREATED)
return ret;
- police->tcfp_t_c = ktime_to_ns(ktime_get());
+ police->tcfp_t_c = ktime_get_ns();
police->tcf_index = parm->index ? parm->index :
tcf_hash_new_index(hinfo);
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -279,7 +279,7 @@
return police->tcfp_result;
}
- now = ktime_to_ns(ktime_get());
+ now = ktime_get_ns();
toks = min_t(s64, now - police->tcfp_t_c,
police->tcfp_burst);
if (police->peak_present) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index ba32c2b..e12f997 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -416,7 +416,7 @@
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_to_ns(ktime_get());
+ u64 now = ktime_get_ns();
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
@@ -787,7 +787,7 @@
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_to_ns(ktime_get());
+ u64 now = ktime_get_ns();
struct tc_fq_qd_stats st = {
.gc_flows = q->stat_gc_flows,
.highprio_packets = q->stat_internal_packets,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9f949ab..aea942c 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -895,7 +895,7 @@
if (!sch->q.qlen)
goto fin;
- q->now = ktime_to_ns(ktime_get());
+ q->now = ktime_get_ns();
start_at = jiffies;
next_event = q->now + 5LLU * NSEC_PER_SEC;
@@ -1225,7 +1225,7 @@
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
- parent->t_c = ktime_to_ns(ktime_get());
+ parent->t_c = ktime_get_ns();
parent->cmode = HTB_CAN_SEND;
}
@@ -1455,7 +1455,7 @@
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
- cl->t_c = ktime_to_ns(ktime_get());
+ cl->t_c = ktime_get_ns();
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 18ff634..0c39b75 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -239,7 +239,7 @@
s64 ptoks = 0;
unsigned int len = qdisc_pkt_len(skb);
- now = ktime_to_ns(ktime_get());
+ now = ktime_get_ns();
toks = min_t(s64, now - q->t_c, q->buffer);
if (tbf_peak_present(q)) {
@@ -292,7 +292,7 @@
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
- q->t_c = ktime_to_ns(ktime_get());
+ q->t_c = ktime_get_ns();
q->tokens = q->buffer;
q->ptokens = q->mtu;
qdisc_watchdog_cancel(&q->watchdog);
@@ -431,7 +431,7 @@
if (opt == NULL)
return -EINVAL;
- q->t_c = ktime_to_ns(ktime_get());
+ q->t_c = ktime_get_ns();
qdisc_watchdog_init(&q->watchdog, sch);
q->qdisc = &noop_qdisc;