Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * Cross Partition Communication (XPC) uv-based functions. |
| 11 | * |
| 12 | * Architecture specific implementation of common functions. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 17 | #include <linux/mm.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/delay.h> |
| 20 | #include <linux/device.h> |
Dean Nelson | 261f3b4 | 2008-07-29 22:34:16 -0700 | [diff] [blame] | 21 | #include <asm/uv/uv_hub.h> |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 22 | #include "../sgi-gru/gru.h" |
Dean Nelson | 261f3b4 | 2008-07-29 22:34:16 -0700 | [diff] [blame] | 23 | #include "../sgi-gru/grukservices.h" |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 24 | #include "xpc.h" |
| 25 | |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 26 | static atomic64_t xpc_heartbeat_uv; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 27 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
| 28 | |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 29 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
| 30 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) |
| 31 | |
| 32 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
| 33 | XPC_ACTIVATE_MSG_SIZE_UV) |
| 34 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
| 35 | XPC_NOTIFY_MSG_SIZE_UV) |
| 36 | |
| 37 | static void *xpc_activate_mq_uv; |
| 38 | static void *xpc_notify_mq_uv; |
| 39 | |
| 40 | static int |
| 41 | xpc_setup_partitions_sn_uv(void) |
| 42 | { |
| 43 | short partid; |
| 44 | struct xpc_partition_uv *part_uv; |
| 45 | |
| 46 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { |
| 47 | part_uv = &xpc_partitions[partid].sn.uv; |
| 48 | |
| 49 | spin_lock_init(&part_uv->flags_lock); |
| 50 | part_uv->remote_act_state = XPC_P_AS_INACTIVE; |
| 51 | } |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | static void * |
| 56 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, |
| 57 | irq_handler_t irq_handler) |
| 58 | { |
| 59 | int ret; |
| 60 | int nid; |
| 61 | int mq_order; |
| 62 | struct page *page; |
| 63 | void *mq; |
| 64 | |
| 65 | nid = cpu_to_node(cpuid); |
| 66 | mq_order = get_order(mq_size); |
| 67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
| 68 | mq_order); |
| 69 | if (page == NULL) |
| 70 | return NULL; |
| 71 | |
| 72 | mq = page_address(page); |
| 73 | ret = gru_create_message_queue(mq, mq_size); |
| 74 | if (ret != 0) { |
| 75 | dev_err(xpc_part, "gru_create_message_queue() returned " |
| 76 | "error=%d\n", ret); |
| 77 | free_pages((unsigned long)mq, mq_order); |
| 78 | return NULL; |
| 79 | } |
| 80 | |
| 81 | /* !!! Need to do some other things to set up IRQ */ |
| 82 | |
| 83 | ret = request_irq(irq, irq_handler, 0, "xpc", NULL); |
| 84 | if (ret != 0) { |
| 85 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", |
| 86 | irq, ret); |
| 87 | free_pages((unsigned long)mq, mq_order); |
| 88 | return NULL; |
| 89 | } |
| 90 | |
| 91 | /* !!! enable generation of irq when GRU mq op occurs to this mq */ |
| 92 | |
| 93 | /* ??? allow other partitions to access GRU mq? */ |
| 94 | |
| 95 | return mq; |
| 96 | } |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 97 | |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 98 | static void |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 99 | xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 100 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 101 | /* ??? disallow other partitions to access GRU mq? */ |
| 102 | |
| 103 | /* !!! disable generation of irq when GRU mq op occurs to this mq */ |
| 104 | |
| 105 | free_irq(irq, NULL); |
| 106 | |
| 107 | free_pages((unsigned long)mq, get_order(mq_size)); |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 110 | static enum xp_retval |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 111 | xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 112 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 113 | enum xp_retval xp_ret; |
| 114 | int ret; |
| 115 | |
| 116 | while (1) { |
| 117 | ret = gru_send_message_gpa(mq_gpa, msg, msg_size); |
| 118 | if (ret == MQE_OK) { |
| 119 | xp_ret = xpSuccess; |
| 120 | break; |
| 121 | } |
| 122 | |
| 123 | if (ret == MQE_QUEUE_FULL) { |
| 124 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " |
| 125 | "error=MQE_QUEUE_FULL\n"); |
| 126 | /* !!! handle QLimit reached; delay & try again */ |
| 127 | /* ??? Do we add a limit to the number of retries? */ |
| 128 | (void)msleep_interruptible(10); |
| 129 | } else if (ret == MQE_CONGESTION) { |
| 130 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " |
| 131 | "error=MQE_CONGESTION\n"); |
| 132 | /* !!! handle LB Overflow; simply try again */ |
| 133 | /* ??? Do we add a limit to the number of retries? */ |
| 134 | } else { |
| 135 | /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ |
| 136 | dev_err(xpc_chan, "gru_send_message_gpa() returned " |
| 137 | "error=%d\n", ret); |
| 138 | xp_ret = xpGruSendMqError; |
| 139 | break; |
| 140 | } |
| 141 | } |
| 142 | return xp_ret; |
| 143 | } |
| 144 | |
| 145 | static void |
| 146 | xpc_process_activate_IRQ_rcvd_uv(void) |
| 147 | { |
| 148 | unsigned long irq_flags; |
| 149 | short partid; |
| 150 | struct xpc_partition *part; |
| 151 | u8 act_state_req; |
| 152 | |
| 153 | DBUG_ON(xpc_activate_IRQ_rcvd == 0); |
| 154 | |
| 155 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 156 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { |
| 157 | part = &xpc_partitions[partid]; |
| 158 | |
| 159 | if (part->sn.uv.act_state_req == 0) |
| 160 | continue; |
| 161 | |
| 162 | xpc_activate_IRQ_rcvd--; |
| 163 | BUG_ON(xpc_activate_IRQ_rcvd < 0); |
| 164 | |
| 165 | act_state_req = part->sn.uv.act_state_req; |
| 166 | part->sn.uv.act_state_req = 0; |
| 167 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 168 | |
| 169 | if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { |
| 170 | if (part->act_state == XPC_P_AS_INACTIVE) |
| 171 | xpc_activate_partition(part); |
| 172 | else if (part->act_state == XPC_P_AS_DEACTIVATING) |
| 173 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); |
| 174 | |
| 175 | } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { |
| 176 | if (part->act_state == XPC_P_AS_INACTIVE) |
| 177 | xpc_activate_partition(part); |
| 178 | else |
| 179 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); |
| 180 | |
| 181 | } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { |
| 182 | XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); |
| 183 | |
| 184 | } else { |
| 185 | BUG(); |
| 186 | } |
| 187 | |
| 188 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 189 | if (xpc_activate_IRQ_rcvd == 0) |
| 190 | break; |
| 191 | } |
| 192 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 193 | |
| 194 | } |
| 195 | |
| 196 | static irqreturn_t |
| 197 | xpc_handle_activate_IRQ_uv(int irq, void *dev_id) |
| 198 | { |
| 199 | unsigned long irq_flags; |
| 200 | struct xpc_activate_mq_msghdr_uv *msg_hdr; |
| 201 | short partid; |
| 202 | struct xpc_partition *part; |
| 203 | struct xpc_partition_uv *part_uv; |
| 204 | struct xpc_openclose_args *args; |
| 205 | int wakeup_hb_checker = 0; |
| 206 | |
| 207 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { |
| 208 | |
| 209 | partid = msg_hdr->partid; |
| 210 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { |
| 211 | dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid" |
| 212 | "partid=0x%x passed in message\n", partid); |
| 213 | gru_free_message(xpc_activate_mq_uv, msg_hdr); |
| 214 | continue; |
| 215 | } |
| 216 | part = &xpc_partitions[partid]; |
| 217 | part_uv = &part->sn.uv; |
| 218 | |
| 219 | part_uv->remote_act_state = msg_hdr->act_state; |
| 220 | |
| 221 | switch (msg_hdr->type) { |
| 222 | case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: |
| 223 | /* syncing of remote_act_state was just done above */ |
| 224 | break; |
| 225 | |
| 226 | case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { |
| 227 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; |
| 228 | |
| 229 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) |
| 230 | msg_hdr; |
| 231 | part_uv->heartbeat = msg->heartbeat; |
| 232 | break; |
| 233 | } |
| 234 | case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { |
| 235 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; |
| 236 | |
| 237 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) |
| 238 | msg_hdr; |
| 239 | part_uv->heartbeat = msg->heartbeat; |
| 240 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
| 241 | part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; |
| 242 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); |
| 243 | break; |
| 244 | } |
| 245 | case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { |
| 246 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; |
| 247 | |
| 248 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) |
| 249 | msg_hdr; |
| 250 | part_uv->heartbeat = msg->heartbeat; |
| 251 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
| 252 | part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; |
| 253 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); |
| 254 | break; |
| 255 | } |
| 256 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { |
| 257 | struct xpc_activate_mq_msg_activate_req_uv *msg; |
| 258 | |
| 259 | /* |
| 260 | * ??? Do we deal here with ts_jiffies being different |
| 261 | * ??? if act_state != XPC_P_AS_INACTIVE instead of |
| 262 | * ??? below? |
| 263 | */ |
| 264 | msg = (struct xpc_activate_mq_msg_activate_req_uv *) |
| 265 | msg_hdr; |
| 266 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, |
| 267 | irq_flags); |
| 268 | if (part_uv->act_state_req == 0) |
| 269 | xpc_activate_IRQ_rcvd++; |
| 270 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; |
| 271 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ |
| 272 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; |
| 273 | part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; |
| 274 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, |
| 275 | irq_flags); |
| 276 | wakeup_hb_checker++; |
| 277 | break; |
| 278 | } |
| 279 | case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { |
| 280 | struct xpc_activate_mq_msg_deactivate_req_uv *msg; |
| 281 | |
| 282 | msg = (struct xpc_activate_mq_msg_deactivate_req_uv *) |
| 283 | msg_hdr; |
| 284 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, |
| 285 | irq_flags); |
| 286 | if (part_uv->act_state_req == 0) |
| 287 | xpc_activate_IRQ_rcvd++; |
| 288 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; |
| 289 | part_uv->reason = msg->reason; |
| 290 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, |
| 291 | irq_flags); |
| 292 | wakeup_hb_checker++; |
| 293 | break; |
| 294 | } |
| 295 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { |
| 296 | struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; |
| 297 | |
| 298 | msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv |
| 299 | *)msg_hdr; |
| 300 | args = &part->remote_openclose_args[msg->ch_number]; |
| 301 | args->reason = msg->reason; |
| 302 | |
| 303 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
| 304 | part->chctl.flags[msg->ch_number] |= |
| 305 | XPC_CHCTL_CLOSEREQUEST; |
| 306 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
| 307 | |
| 308 | xpc_wakeup_channel_mgr(part); |
| 309 | break; |
| 310 | } |
| 311 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { |
| 312 | struct xpc_activate_mq_msg_chctl_closereply_uv *msg; |
| 313 | |
| 314 | msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *) |
| 315 | msg_hdr; |
| 316 | |
| 317 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
| 318 | part->chctl.flags[msg->ch_number] |= |
| 319 | XPC_CHCTL_CLOSEREPLY; |
| 320 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
| 321 | |
| 322 | xpc_wakeup_channel_mgr(part); |
| 323 | break; |
| 324 | } |
| 325 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { |
| 326 | struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; |
| 327 | |
| 328 | msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv |
| 329 | *)msg_hdr; |
| 330 | args = &part->remote_openclose_args[msg->ch_number]; |
| 331 | args->msg_size = msg->msg_size; |
| 332 | args->local_nentries = msg->local_nentries; |
| 333 | |
| 334 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
| 335 | part->chctl.flags[msg->ch_number] |= |
| 336 | XPC_CHCTL_OPENREQUEST; |
| 337 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
| 338 | |
| 339 | xpc_wakeup_channel_mgr(part); |
| 340 | break; |
| 341 | } |
| 342 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { |
| 343 | struct xpc_activate_mq_msg_chctl_openreply_uv *msg; |
| 344 | |
| 345 | msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *) |
| 346 | msg_hdr; |
| 347 | args = &part->remote_openclose_args[msg->ch_number]; |
| 348 | args->remote_nentries = msg->remote_nentries; |
| 349 | args->local_nentries = msg->local_nentries; |
| 350 | args->local_msgqueue_pa = msg->local_notify_mq_gpa; |
| 351 | |
| 352 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
| 353 | part->chctl.flags[msg->ch_number] |= |
| 354 | XPC_CHCTL_OPENREPLY; |
| 355 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
| 356 | |
| 357 | xpc_wakeup_channel_mgr(part); |
| 358 | break; |
| 359 | } |
| 360 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: |
| 361 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
| 362 | part_uv->flags |= XPC_P_ENGAGED_UV; |
| 363 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); |
| 364 | break; |
| 365 | |
| 366 | case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: |
| 367 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
| 368 | part_uv->flags &= ~XPC_P_ENGAGED_UV; |
| 369 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); |
| 370 | break; |
| 371 | |
| 372 | default: |
| 373 | dev_err(xpc_part, "received unknown activate_mq msg " |
| 374 | "type=%d from partition=%d\n", msg_hdr->type, |
| 375 | partid); |
| 376 | } |
| 377 | |
| 378 | if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && |
| 379 | part->remote_rp_ts_jiffies != 0) { |
| 380 | /* |
| 381 | * ??? Does what we do here need to be sensitive to |
| 382 | * ??? act_state or remote_act_state? |
| 383 | */ |
| 384 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, |
| 385 | irq_flags); |
| 386 | if (part_uv->act_state_req == 0) |
| 387 | xpc_activate_IRQ_rcvd++; |
| 388 | part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; |
| 389 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, |
| 390 | irq_flags); |
| 391 | wakeup_hb_checker++; |
| 392 | } |
| 393 | |
| 394 | gru_free_message(xpc_activate_mq_uv, msg_hdr); |
| 395 | } |
| 396 | |
| 397 | if (wakeup_hb_checker) |
| 398 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
| 399 | |
| 400 | return IRQ_HANDLED; |
| 401 | } |
| 402 | |
| 403 | static enum xp_retval |
| 404 | xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, |
| 405 | int msg_type) |
| 406 | { |
| 407 | struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; |
| 408 | |
| 409 | DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); |
| 410 | |
| 411 | msg_hdr->type = msg_type; |
| 412 | msg_hdr->partid = XPC_PARTID(part); |
| 413 | msg_hdr->act_state = part->act_state; |
| 414 | msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; |
| 415 | |
| 416 | /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ |
| 417 | return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, |
| 418 | msg_size); |
| 419 | } |
| 420 | |
| 421 | static void |
| 422 | xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, |
| 423 | size_t msg_size, int msg_type) |
| 424 | { |
| 425 | enum xp_retval ret; |
| 426 | |
| 427 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); |
| 428 | if (unlikely(ret != xpSuccess)) |
| 429 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 430 | } |
| 431 | |
| 432 | static void |
| 433 | xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, |
| 434 | void *msg, size_t msg_size, int msg_type) |
| 435 | { |
| 436 | struct xpc_partition *part = &xpc_partitions[ch->number]; |
| 437 | enum xp_retval ret; |
| 438 | |
| 439 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); |
| 440 | if (unlikely(ret != xpSuccess)) { |
| 441 | if (irq_flags != NULL) |
| 442 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
| 443 | |
| 444 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 445 | |
| 446 | if (irq_flags != NULL) |
| 447 | spin_lock_irqsave(&ch->lock, *irq_flags); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | static void |
| 452 | xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) |
| 453 | { |
| 454 | unsigned long irq_flags; |
| 455 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
| 456 | |
| 457 | /* |
| 458 | * !!! Make our side think that the remote parition sent an activate |
| 459 | * !!! message our way by doing what the activate IRQ handler would |
| 460 | * !!! do had one really been sent. |
| 461 | */ |
| 462 | |
| 463 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 464 | if (part_uv->act_state_req == 0) |
| 465 | xpc_activate_IRQ_rcvd++; |
| 466 | part_uv->act_state_req = act_state_req; |
| 467 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
| 468 | |
| 469 | wake_up_interruptible(&xpc_activate_IRQ_wq); |
| 470 | } |
| 471 | |
| 472 | static enum xp_retval |
| 473 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, |
| 474 | size_t *len) |
| 475 | { |
| 476 | /* !!! call the UV version of sn_partition_reserved_page_pa() */ |
| 477 | return xpUnsupported; |
| 478 | } |
| 479 | |
| 480 | static int |
| 481 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) |
| 482 | { |
| 483 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | static void |
| 488 | xpc_send_heartbeat_uv(int msg_type) |
| 489 | { |
| 490 | short partid; |
| 491 | struct xpc_partition *part; |
| 492 | struct xpc_activate_mq_msg_heartbeat_req_uv msg; |
| 493 | |
| 494 | /* |
| 495 | * !!! On uv we're broadcasting a heartbeat message every 5 seconds. |
| 496 | * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 |
| 497 | * !!! seconds. This is an increase in numalink traffic. |
| 498 | * ??? Is this good? |
| 499 | */ |
| 500 | |
| 501 | msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); |
| 502 | |
| 503 | partid = find_first_bit(xpc_heartbeating_to_mask_uv, |
| 504 | XP_MAX_NPARTITIONS_UV); |
| 505 | |
| 506 | while (partid < XP_MAX_NPARTITIONS_UV) { |
| 507 | part = &xpc_partitions[partid]; |
| 508 | |
| 509 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 510 | msg_type); |
| 511 | |
| 512 | partid = find_next_bit(xpc_heartbeating_to_mask_uv, |
| 513 | XP_MAX_NPARTITIONS_UV, partid + 1); |
| 514 | } |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 517 | static void |
| 518 | xpc_increment_heartbeat_uv(void) |
| 519 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 520 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); |
| 521 | } |
| 522 | |
| 523 | static void |
| 524 | xpc_offline_heartbeat_uv(void) |
| 525 | { |
| 526 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); |
| 527 | } |
| 528 | |
| 529 | static void |
| 530 | xpc_online_heartbeat_uv(void) |
| 531 | { |
| 532 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | static void |
| 536 | xpc_heartbeat_init_uv(void) |
| 537 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 538 | atomic64_set(&xpc_heartbeat_uv, 0); |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 539 | bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
| 540 | xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; |
| 541 | } |
| 542 | |
| 543 | static void |
| 544 | xpc_heartbeat_exit_uv(void) |
| 545 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 546 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); |
| 547 | } |
| 548 | |
| 549 | static enum xp_retval |
| 550 | xpc_get_remote_heartbeat_uv(struct xpc_partition *part) |
| 551 | { |
| 552 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
| 553 | enum xp_retval ret = xpNoHeartbeat; |
| 554 | |
| 555 | if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && |
| 556 | part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { |
| 557 | |
| 558 | if (part_uv->heartbeat != part->last_heartbeat || |
| 559 | (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { |
| 560 | |
| 561 | part->last_heartbeat = part_uv->heartbeat; |
| 562 | ret = xpSuccess; |
| 563 | } |
| 564 | } |
| 565 | return ret; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | static void |
Dean Nelson | a47d5da | 2008-07-29 22:34:09 -0700 | [diff] [blame] | 569 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 570 | unsigned long remote_rp_gpa, int nasid) |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 571 | { |
| 572 | short partid = remote_rp->SAL_partid; |
| 573 | struct xpc_partition *part = &xpc_partitions[partid]; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 574 | struct xpc_activate_mq_msg_activate_req_uv msg; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 575 | |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 576 | part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ |
| 577 | part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; |
| 578 | part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 579 | |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 580 | /* |
| 581 | * ??? Is it a good idea to make this conditional on what is |
| 582 | * ??? potentially stale state information? |
| 583 | */ |
| 584 | if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { |
| 585 | msg.rp_gpa = uv_gpa(xpc_rsvd_page); |
| 586 | msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; |
| 587 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 588 | XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); |
| 589 | } |
| 590 | |
| 591 | if (part->act_state == XPC_P_AS_INACTIVE) |
| 592 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 593 | } |
| 594 | |
Dean Nelson | a47d5da | 2008-07-29 22:34:09 -0700 | [diff] [blame] | 595 | static void |
| 596 | xpc_request_partition_reactivation_uv(struct xpc_partition *part) |
| 597 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 598 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); |
| 599 | } |
| 600 | |
| 601 | static void |
| 602 | xpc_request_partition_deactivation_uv(struct xpc_partition *part) |
| 603 | { |
| 604 | struct xpc_activate_mq_msg_deactivate_req_uv msg; |
| 605 | |
| 606 | /* |
| 607 | * ??? Is it a good idea to make this conditional on what is |
| 608 | * ??? potentially stale state information? |
| 609 | */ |
| 610 | if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && |
| 611 | part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { |
| 612 | |
| 613 | msg.reason = part->reason; |
| 614 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 615 | XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); |
| 616 | } |
Dean Nelson | a47d5da | 2008-07-29 22:34:09 -0700 | [diff] [blame] | 617 | } |
| 618 | |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 619 | /* |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 620 | * Setup the channel structures that are uv specific. |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 621 | */ |
| 622 | static enum xp_retval |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 623 | xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 624 | { |
Dean Nelson | ea57f80 | 2008-07-29 22:34:14 -0700 | [diff] [blame] | 625 | /* !!! this function needs fleshing out */ |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 626 | return xpUnsupported; |
| 627 | } |
| 628 | |
| 629 | /* |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 630 | * Teardown the channel structures that are uv specific. |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 631 | */ |
| 632 | static void |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 633 | xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 634 | { |
Dean Nelson | ea57f80 | 2008-07-29 22:34:14 -0700 | [diff] [blame] | 635 | /* !!! this function needs fleshing out */ |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 636 | return; |
| 637 | } |
| 638 | |
| 639 | static enum xp_retval |
| 640 | xpc_make_first_contact_uv(struct xpc_partition *part) |
| 641 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 642 | struct xpc_activate_mq_msg_uv msg; |
| 643 | |
| 644 | /* |
| 645 | * We send a sync msg to get the remote partition's remote_act_state |
| 646 | * updated to our current act_state which at this point should |
| 647 | * be XPC_P_AS_ACTIVATING. |
| 648 | */ |
| 649 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 650 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); |
| 651 | |
| 652 | while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { |
| 653 | |
| 654 | dev_dbg(xpc_part, "waiting to make first contact with " |
| 655 | "partition %d\n", XPC_PARTID(part)); |
| 656 | |
| 657 | /* wait a 1/4 of a second or so */ |
| 658 | (void)msleep_interruptible(250); |
| 659 | |
| 660 | if (part->act_state == XPC_P_AS_DEACTIVATING) |
| 661 | return part->reason; |
| 662 | } |
| 663 | |
| 664 | return xpSuccess; |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 665 | } |
| 666 | |
| 667 | static u64 |
Dean Nelson | 7fb5e59 | 2008-07-29 22:34:10 -0700 | [diff] [blame] | 668 | xpc_get_chctl_all_flags_uv(struct xpc_partition *part) |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 669 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 670 | unsigned long irq_flags; |
| 671 | union xpc_channel_ctl_flags chctl; |
| 672 | |
| 673 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
| 674 | chctl = part->chctl; |
| 675 | if (chctl.all_flags != 0) |
| 676 | part->chctl.all_flags = 0; |
| 677 | |
| 678 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
| 679 | return chctl.all_flags; |
| 680 | } |
| 681 | |
| 682 | static enum xp_retval |
| 683 | xpc_setup_msg_structures_uv(struct xpc_channel *ch) |
| 684 | { |
Dean Nelson | ea57f80 | 2008-07-29 22:34:14 -0700 | [diff] [blame] | 685 | /* !!! this function needs fleshing out */ |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 686 | return xpUnsupported; |
| 687 | } |
| 688 | |
| 689 | static void |
| 690 | xpc_teardown_msg_structures_uv(struct xpc_channel *ch) |
| 691 | { |
| 692 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; |
| 693 | |
| 694 | ch_uv->remote_notify_mq_gpa = 0; |
| 695 | |
| 696 | /* !!! this function needs fleshing out */ |
| 697 | } |
| 698 | |
| 699 | static void |
| 700 | xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) |
| 701 | { |
| 702 | struct xpc_activate_mq_msg_chctl_closerequest_uv msg; |
| 703 | |
| 704 | msg.ch_number = ch->number; |
| 705 | msg.reason = ch->reason; |
| 706 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
| 707 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); |
| 708 | } |
| 709 | |
| 710 | static void |
| 711 | xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) |
| 712 | { |
| 713 | struct xpc_activate_mq_msg_chctl_closereply_uv msg; |
| 714 | |
| 715 | msg.ch_number = ch->number; |
| 716 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
| 717 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); |
| 718 | } |
| 719 | |
| 720 | static void |
| 721 | xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) |
| 722 | { |
| 723 | struct xpc_activate_mq_msg_chctl_openrequest_uv msg; |
| 724 | |
| 725 | msg.ch_number = ch->number; |
| 726 | msg.msg_size = ch->msg_size; |
| 727 | msg.local_nentries = ch->local_nentries; |
| 728 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
| 729 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); |
| 730 | } |
| 731 | |
| 732 | static void |
| 733 | xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) |
| 734 | { |
| 735 | struct xpc_activate_mq_msg_chctl_openreply_uv msg; |
| 736 | |
| 737 | msg.ch_number = ch->number; |
| 738 | msg.local_nentries = ch->local_nentries; |
| 739 | msg.remote_nentries = ch->remote_nentries; |
| 740 | msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); |
| 741 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
| 742 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); |
| 743 | } |
| 744 | |
| 745 | static void |
| 746 | xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, |
| 747 | unsigned long msgqueue_pa) |
| 748 | { |
| 749 | ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; |
| 750 | } |
| 751 | |
| 752 | static void |
| 753 | xpc_indicate_partition_engaged_uv(struct xpc_partition *part) |
| 754 | { |
| 755 | struct xpc_activate_mq_msg_uv msg; |
| 756 | |
| 757 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 758 | XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); |
| 759 | } |
| 760 | |
| 761 | static void |
| 762 | xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) |
| 763 | { |
| 764 | struct xpc_activate_mq_msg_uv msg; |
| 765 | |
| 766 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), |
| 767 | XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); |
| 768 | } |
| 769 | |
| 770 | static void |
| 771 | xpc_assume_partition_disengaged_uv(short partid) |
| 772 | { |
| 773 | struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; |
| 774 | unsigned long irq_flags; |
| 775 | |
| 776 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); |
| 777 | part_uv->flags &= ~XPC_P_ENGAGED_UV; |
| 778 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); |
| 779 | } |
| 780 | |
| 781 | static int |
| 782 | xpc_partition_engaged_uv(short partid) |
| 783 | { |
| 784 | return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; |
| 785 | } |
| 786 | |
| 787 | static int |
| 788 | xpc_any_partition_engaged_uv(void) |
| 789 | { |
| 790 | struct xpc_partition_uv *part_uv; |
| 791 | short partid; |
| 792 | |
| 793 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { |
| 794 | part_uv = &xpc_partitions[partid].sn.uv; |
| 795 | if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) |
| 796 | return 1; |
| 797 | } |
| 798 | return 0; |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | static struct xpc_msg * |
| 802 | xpc_get_deliverable_msg_uv(struct xpc_channel *ch) |
| 803 | { |
Dean Nelson | ea57f80 | 2008-07-29 22:34:14 -0700 | [diff] [blame] | 804 | /* !!! this function needs fleshing out */ |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 805 | return NULL; |
| 806 | } |
| 807 | |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 808 | int |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 809 | xpc_init_uv(void) |
| 810 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 811 | xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; |
| 812 | xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; |
| 813 | xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; |
| 814 | xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 815 | xpc_increment_heartbeat = xpc_increment_heartbeat_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 816 | xpc_offline_heartbeat = xpc_offline_heartbeat_uv; |
| 817 | xpc_online_heartbeat = xpc_online_heartbeat_uv; |
Dean Nelson | 33ba3c7 | 2008-07-29 22:34:07 -0700 | [diff] [blame] | 818 | xpc_heartbeat_init = xpc_heartbeat_init_uv; |
| 819 | xpc_heartbeat_exit = xpc_heartbeat_exit_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 820 | xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; |
| 821 | |
Dean Nelson | a47d5da | 2008-07-29 22:34:09 -0700 | [diff] [blame] | 822 | xpc_request_partition_activation = xpc_request_partition_activation_uv; |
| 823 | xpc_request_partition_reactivation = |
| 824 | xpc_request_partition_reactivation_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 825 | xpc_request_partition_deactivation = |
| 826 | xpc_request_partition_deactivation_uv; |
| 827 | |
| 828 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; |
| 829 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; |
| 830 | |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 831 | xpc_make_first_contact = xpc_make_first_contact_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 832 | |
Dean Nelson | 7fb5e59 | 2008-07-29 22:34:10 -0700 | [diff] [blame] | 833 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 834 | xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; |
| 835 | xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; |
| 836 | xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; |
| 837 | xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; |
| 838 | |
| 839 | xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; |
| 840 | |
| 841 | xpc_setup_msg_structures = xpc_setup_msg_structures_uv; |
| 842 | xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; |
| 843 | |
| 844 | xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; |
| 845 | xpc_indicate_partition_disengaged = |
| 846 | xpc_indicate_partition_disengaged_uv; |
| 847 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; |
| 848 | xpc_partition_engaged = xpc_partition_engaged_uv; |
| 849 | xpc_any_partition_engaged = xpc_any_partition_engaged_uv; |
| 850 | |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame] | 851 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 852 | |
| 853 | /* ??? The cpuid argument's value is 0, is that what we want? */ |
| 854 | /* !!! The irq argument's value isn't correct. */ |
| 855 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, |
| 856 | xpc_handle_activate_IRQ_uv); |
| 857 | if (xpc_activate_mq_uv == NULL) |
| 858 | return -ENOMEM; |
| 859 | |
| 860 | return 0; |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 861 | } |
| 862 | |
| 863 | void |
| 864 | xpc_exit_uv(void) |
| 865 | { |
Dean Nelson | 5b8669d | 2008-07-29 22:34:18 -0700 | [diff] [blame^] | 866 | /* !!! The irq argument's value isn't correct. */ |
| 867 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 868 | } |