Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * IPVS: Never Queue scheduling module |
| 3 | * |
| 4 | * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $ |
| 5 | * |
| 6 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version |
| 11 | * 2 of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * Changes: |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * The NQ algorithm adopts a two-speed model. When there is an idle server |
| 19 | * available, the job will be sent to the idle server, instead of waiting |
| 20 | * for a fast one. When there is no idle server available, the job will be |
| 21 | * sent to the server that minimize its expected delay (The Shortest |
| 22 | * Expected Delay scheduling algorithm). |
| 23 | * |
| 24 | * See the following paper for more information: |
| 25 | * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing |
| 26 | * in large heterogeneous systems. In Proceedings IEEE INFOCOM'88, |
| 27 | * pages 986-994, 1988. |
| 28 | * |
| 29 | * Thanks must go to Marko Buuri <marko@buuri.name> for talking NQ to me. |
| 30 | * |
| 31 | * The difference between NQ and SED is that NQ can improve overall |
| 32 | * system utilization. |
| 33 | * |
| 34 | */ |
| 35 | |
| 36 | #include <linux/module.h> |
| 37 | #include <linux/kernel.h> |
| 38 | |
| 39 | #include <net/ip_vs.h> |
| 40 | |
| 41 | |
| 42 | static int |
| 43 | ip_vs_nq_init_svc(struct ip_vs_service *svc) |
| 44 | { |
| 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | |
| 49 | static int |
| 50 | ip_vs_nq_done_svc(struct ip_vs_service *svc) |
| 51 | { |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | |
| 56 | static int |
| 57 | ip_vs_nq_update_svc(struct ip_vs_service *svc) |
| 58 | { |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | |
| 63 | static inline unsigned int |
| 64 | ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) |
| 65 | { |
| 66 | /* |
| 67 | * We only use the active connection number in the cost |
| 68 | * calculation here. |
| 69 | */ |
| 70 | return atomic_read(&dest->activeconns) + 1; |
| 71 | } |
| 72 | |
| 73 | |
| 74 | /* |
| 75 | * Weighted Least Connection scheduling |
| 76 | */ |
| 77 | static struct ip_vs_dest * |
| 78 | ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) |
| 79 | { |
| 80 | struct ip_vs_dest *dest, *least = NULL; |
| 81 | unsigned int loh = 0, doh; |
| 82 | |
| 83 | IP_VS_DBG(6, "ip_vs_nq_schedule(): Scheduling...\n"); |
| 84 | |
| 85 | /* |
| 86 | * We calculate the load of each dest server as follows: |
| 87 | * (server expected overhead) / dest->weight |
| 88 | * |
| 89 | * Remember -- no floats in kernel mode!!! |
| 90 | * The comparison of h1*w2 > h2*w1 is equivalent to that of |
| 91 | * h1/w1 > h2/w2 |
| 92 | * if every weight is larger than zero. |
| 93 | * |
| 94 | * The server with weight=0 is quiesced and will not receive any |
| 95 | * new connections. |
| 96 | */ |
| 97 | |
| 98 | list_for_each_entry(dest, &svc->destinations, n_list) { |
| 99 | |
| 100 | if (dest->flags & IP_VS_DEST_F_OVERLOAD || |
| 101 | !atomic_read(&dest->weight)) |
| 102 | continue; |
| 103 | |
| 104 | doh = ip_vs_nq_dest_overhead(dest); |
| 105 | |
| 106 | /* return the server directly if it is idle */ |
| 107 | if (atomic_read(&dest->activeconns) == 0) { |
| 108 | least = dest; |
| 109 | loh = doh; |
| 110 | goto out; |
| 111 | } |
| 112 | |
| 113 | if (!least || |
| 114 | (loh * atomic_read(&dest->weight) > |
| 115 | doh * atomic_read(&least->weight))) { |
| 116 | least = dest; |
| 117 | loh = doh; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | if (!least) |
| 122 | return NULL; |
| 123 | |
| 124 | out: |
| 125 | IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " |
| 126 | "activeconns %d refcnt %d weight %d overhead %d\n", |
| 127 | NIPQUAD(least->addr), ntohs(least->port), |
| 128 | atomic_read(&least->activeconns), |
| 129 | atomic_read(&least->refcnt), |
| 130 | atomic_read(&least->weight), loh); |
| 131 | |
| 132 | return least; |
| 133 | } |
| 134 | |
| 135 | |
| 136 | static struct ip_vs_scheduler ip_vs_nq_scheduler = |
| 137 | { |
| 138 | .name = "nq", |
| 139 | .refcnt = ATOMIC_INIT(0), |
| 140 | .module = THIS_MODULE, |
| 141 | .init_service = ip_vs_nq_init_svc, |
| 142 | .done_service = ip_vs_nq_done_svc, |
| 143 | .update_service = ip_vs_nq_update_svc, |
| 144 | .schedule = ip_vs_nq_schedule, |
| 145 | }; |
| 146 | |
| 147 | |
| 148 | static int __init ip_vs_nq_init(void) |
| 149 | { |
| 150 | INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list); |
| 151 | return register_ip_vs_scheduler(&ip_vs_nq_scheduler); |
| 152 | } |
| 153 | |
| 154 | static void __exit ip_vs_nq_cleanup(void) |
| 155 | { |
| 156 | unregister_ip_vs_scheduler(&ip_vs_nq_scheduler); |
| 157 | } |
| 158 | |
| 159 | module_init(ip_vs_nq_init); |
| 160 | module_exit(ip_vs_nq_cleanup); |
| 161 | MODULE_LICENSE("GPL"); |