blob: 0228d8afcd9ace2bc966b59c56a76256ee63efe7 [file] [log] [blame]
Evan Chengd38c22b2006-05-11 23:55:42 +00001//===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Evan Chengd38c22b2006-05-11 23:55:42 +000019#include "llvm/CodeGen/ScheduleDAG.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000020#include "llvm/CodeGen/SchedulerRegistry.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000021#include "llvm/CodeGen/SSARegMap.h"
22#include "llvm/Target/MRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000023#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000024#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
Chris Lattner3d27be12006-08-27 12:54:02 +000027#include "llvm/Support/Compiler.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000028#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000029#include "llvm/ADT/Statistic.h"
30#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000031#include <queue>
32#include "llvm/Support/CommandLine.h"
33using namespace llvm;
34
Jim Laskey95eda5b2006-08-01 14:21:23 +000035static RegisterScheduler
36 burrListDAGScheduler("list-burr",
37 " Bottom-up register reduction list scheduling",
38 createBURRListDAGScheduler);
39static RegisterScheduler
40 tdrListrDAGScheduler("list-tdrr",
41 " Top-down register reduction list scheduling",
42 createTDRRListDAGScheduler);
43
Evan Chengd38c22b2006-05-11 23:55:42 +000044namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +000045//===----------------------------------------------------------------------===//
46/// ScheduleDAGRRList - The actual register reduction list scheduler
47/// implementation. This supports both top-down and bottom-up scheduling.
48///
Chris Lattnere097e6f2006-06-28 22:17:39 +000049class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
Evan Chengd38c22b2006-05-11 23:55:42 +000050private:
51 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
52 /// it is top-down.
53 bool isBottomUp;
54
55 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Cheng5924bf72007-09-25 01:54:36 +000056 ///a
Evan Chengd38c22b2006-05-11 23:55:42 +000057 SchedulingPriorityQueue *AvailableQueue;
58
Evan Cheng5924bf72007-09-25 01:54:36 +000059 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
60 /// that are "live". These nodes must be scheduled before any other nodes that
61 /// modifies the registers can be scheduled.
62 SmallSet<unsigned, 4> LiveRegs;
63 std::vector<SUnit*> LiveRegDefs;
64 std::vector<unsigned> LiveRegCycles;
65
Evan Chengd38c22b2006-05-11 23:55:42 +000066public:
67 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
68 const TargetMachine &tm, bool isbottomup,
69 SchedulingPriorityQueue *availqueue)
70 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
71 AvailableQueue(availqueue) {
72 }
73
74 ~ScheduleDAGRRList() {
75 delete AvailableQueue;
76 }
77
78 void Schedule();
79
80private:
81 void ReleasePred(SUnit *PredSU, bool isChain, unsigned CurCycle);
82 void ReleaseSucc(SUnit *SuccSU, bool isChain, unsigned CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +000083 void CapturePred(SUnit *PredSU, SUnit *SU, bool isChain);
Evan Chengd12c97d2006-05-30 18:05:39 +000084 void ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle);
85 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +000086 void UnscheduleNodeBottomUp(SUnit *SU);
87 SUnit *BackTrackBottomUp(SUnit*, unsigned, unsigned&, bool&);
88 SUnit *CopyAndMoveSuccessors(SUnit *SU);
89 bool DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +000090 void ListScheduleTopDown();
91 void ListScheduleBottomUp();
Evan Chengafed73e2006-05-12 01:58:24 +000092 void CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +000093};
94} // end anonymous namespace
95
96
97/// Schedule - Schedule the DAG using list scheduling.
98void ScheduleDAGRRList::Schedule() {
Bill Wendling22e978a2006-12-07 20:04:42 +000099 DOUT << "********** List Scheduling **********\n";
Evan Cheng5924bf72007-09-25 01:54:36 +0000100
101 LiveRegDefs.resize(MRI->getNumRegs(), NULL);
102 LiveRegCycles.resize(MRI->getNumRegs(), 0);
103
Evan Chengd38c22b2006-05-11 23:55:42 +0000104 // Build scheduling units.
105 BuildSchedUnits();
106
Evan Chengd38c22b2006-05-11 23:55:42 +0000107 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Chris Lattnerd86418a2006-08-17 00:09:56 +0000108 SUnits[su].dumpAll(&DAG));
Evan Cheng47fbeda2006-10-14 08:34:06 +0000109 CalculateDepths();
110 CalculateHeights();
Evan Chengd38c22b2006-05-11 23:55:42 +0000111
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000112 AvailableQueue->initNodes(SUnitMap, SUnits);
Dan Gohman54a187e2007-08-20 19:28:38 +0000113
Evan Chengd38c22b2006-05-11 23:55:42 +0000114 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
115 if (isBottomUp)
116 ListScheduleBottomUp();
117 else
118 ListScheduleTopDown();
119
120 AvailableQueue->releaseState();
Dan Gohman54a187e2007-08-20 19:28:38 +0000121
Evan Cheng009f5f52006-05-25 08:37:31 +0000122 CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000123
Bill Wendling22e978a2006-12-07 20:04:42 +0000124 DOUT << "*** Final schedule ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000125 DEBUG(dumpSchedule());
Bill Wendling22e978a2006-12-07 20:04:42 +0000126 DOUT << "\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000127
128 // Emit in scheduled order
129 EmitSchedule();
130}
131
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000132/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
Evan Chengafed73e2006-05-12 01:58:24 +0000133/// it is not the last use of its first operand, add it to the CommuteSet if
134/// possible. It will be commuted when it is translated to a MI.
135void ScheduleDAGRRList::CommuteNodesToReducePressure() {
Evan Chenge3c44192007-06-22 01:35:51 +0000136 SmallPtrSet<SUnit*, 4> OperandSeen;
Evan Chengafed73e2006-05-12 01:58:24 +0000137 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
138 SUnit *SU = Sequence[i];
139 if (!SU) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000140 if (SU->isCommutable) {
141 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +0000142 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000143 unsigned NumOps = CountOperands(SU->Node);
144 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +0000145 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) == -1)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000146 continue;
147
148 SDNode *OpN = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000149 SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000150 if (OpSU && OperandSeen.count(OpSU) == 1) {
151 // Ok, so SU is not the last use of OpSU, but SU is two-address so
152 // it will clobber OpSU. Try to commute SU if no other source operands
153 // are live below.
154 bool DoCommute = true;
155 for (unsigned k = 0; k < NumOps; ++k) {
156 if (k != j) {
157 OpN = SU->Node->getOperand(k).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000158 OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000159 if (OpSU && OperandSeen.count(OpSU) == 1) {
160 DoCommute = false;
161 break;
162 }
163 }
Evan Chengafed73e2006-05-12 01:58:24 +0000164 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000165 if (DoCommute)
166 CommuteSet.insert(SU->Node);
Evan Chengafed73e2006-05-12 01:58:24 +0000167 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000168
169 // Only look at the first use&def node for now.
170 break;
Evan Chengafed73e2006-05-12 01:58:24 +0000171 }
172 }
173
Chris Lattnerd86418a2006-08-17 00:09:56 +0000174 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
175 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000176 if (!I->isCtrl)
177 OperandSeen.insert(I->Dep);
Evan Chengafed73e2006-05-12 01:58:24 +0000178 }
179 }
180}
Evan Chengd38c22b2006-05-11 23:55:42 +0000181
182//===----------------------------------------------------------------------===//
183// Bottom-Up Scheduling
184//===----------------------------------------------------------------------===//
185
Evan Chengd38c22b2006-05-11 23:55:42 +0000186/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000187/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000188void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
189 unsigned CurCycle) {
190 // FIXME: the distance between two nodes is not always == the predecessor's
191 // latency. For example, the reader can very well read the register written
192 // by the predecessor later than the issue cycle. It also depends on the
193 // interrupt model (drain vs. freeze).
194 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
195
196 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000197 --PredSU->NumSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000198 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000199 --PredSU->NumChainSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000200
201#ifndef NDEBUG
202 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000203 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000204 PredSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000205 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000206 assert(0);
207 }
208#endif
209
210 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
211 // EntryToken has to go last! Special case it here.
212 if (PredSU->Node->getOpcode() != ISD::EntryToken) {
213 PredSU->isAvailable = true;
214 AvailableQueue->push(PredSU);
215 }
216 }
217}
218
219/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
220/// count of its predecessors. If a predecessor pending count is zero, add it to
221/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000222void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000223 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000224 DEBUG(SU->dump(&DAG));
225 SU->Cycle = CurCycle;
226
227 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000228
229 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000230 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000231 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000232 ReleasePred(I->Dep, I->isCtrl, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000233 if (I->Cost < 0) {
234 // This is a physical register dependency and it's impossible or
235 // expensive to copy the register. Make sure nothing that can
236 // clobber the register is scheduled between the predecessor and
237 // this node.
238 if (LiveRegs.insert(I->Reg)) {
239 LiveRegDefs[I->Reg] = I->Dep;
240 LiveRegCycles[I->Reg] = CurCycle;
241 }
242 }
243 }
244
245 // Release all the implicit physical register defs that are live.
246 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
247 I != E; ++I) {
248 if (I->Cost < 0) {
249 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
250 LiveRegs.erase(I->Reg);
251 assert(LiveRegDefs[I->Reg] == SU &&
252 "Physical register dependency violated?");
253 LiveRegDefs[I->Reg] = NULL;
254 LiveRegCycles[I->Reg] = 0;
255 }
256 }
257 }
258
Evan Chengd38c22b2006-05-11 23:55:42 +0000259 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000260}
261
Evan Cheng5924bf72007-09-25 01:54:36 +0000262/// CapturePred - This does the opposite of ReleasePred. Since SU is being
263/// unscheduled, incrcease the succ left count of its predecessors. Remove
264/// them from AvailableQueue if necessary.
265void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
266 PredSU->CycleBound = 0;
267 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
268 I != E; ++I) {
269 if (I->Dep == SU)
270 continue;
271 PredSU->CycleBound = std::max(PredSU->CycleBound,
272 I->Dep->Cycle + PredSU->Latency);
273 }
274
275 if (PredSU->isAvailable) {
276 PredSU->isAvailable = false;
277 if (!PredSU->isPending)
278 AvailableQueue->remove(PredSU);
279 }
280
281 if (!isChain)
282 ++PredSU->NumSuccsLeft;
283 else
284 ++PredSU->NumChainSuccsLeft;
285}
286
287/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
288/// its predecessor states to reflect the change.
289void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
290 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
291 DEBUG(SU->dump(&DAG));
292
293 AvailableQueue->UnscheduledNode(SU);
294
295 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
296 I != E; ++I) {
297 CapturePred(I->Dep, SU, I->isCtrl);
298 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
299 LiveRegs.erase(I->Reg);
300 assert(LiveRegDefs[I->Reg] == I->Dep &&
301 "Physical register dependency violated?");
302 LiveRegDefs[I->Reg] = NULL;
303 LiveRegCycles[I->Reg] = 0;
304 }
305 }
306
307 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
308 I != E; ++I) {
309 if (I->Cost < 0) {
310 if (LiveRegs.insert(I->Reg)) {
311 assert(!LiveRegDefs[I->Reg] &&
312 "Physical register dependency violated?");
313 LiveRegDefs[I->Reg] = SU;
314 }
315 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
316 LiveRegCycles[I->Reg] = I->Dep->Cycle;
317 }
318 }
319
320 SU->Cycle = 0;
321 SU->isScheduled = false;
322 SU->isAvailable = true;
323 AvailableQueue->push(SU);
324}
325
326/// BackTrackBottomUp - Back track scheduling to a previous cycle specified in
327/// BTCycle in order to schedule a specific node. Returns the last unscheduled
328/// SUnit. Also returns if a successor is unscheduled in the process.
329SUnit *ScheduleDAGRRList::BackTrackBottomUp(SUnit *SU, unsigned BTCycle,
330 unsigned &CurCycle, bool &SuccUnsched) {
331 SuccUnsched = false;
332 SUnit *OldSU = NULL;
333 while (CurCycle > BTCycle) {
334 OldSU = Sequence.back();
335 Sequence.pop_back();
336 if (SU->isSucc(OldSU))
337 SuccUnsched = true;
338 UnscheduleNodeBottomUp(OldSU);
339 --CurCycle;
340 }
341
342
343 if (SU->isSucc(OldSU)) {
344 assert(false && "Something is wrong!");
345 abort();
346 }
347
348 return OldSU;
349}
350
351/// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned,
352/// i.e. the node does not produce a flag, it does not read a flag and it does
353/// not have an incoming chain.
354static bool isSafeToCopy(SDNode *N) {
355 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
356 if (N->getValueType(i) == MVT::Flag)
357 return false;
358 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
359 const SDOperand &Op = N->getOperand(i);
360 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
361 if (VT == MVT::Other || VT == MVT::Flag)
362 return false;
363 }
364
365 return true;
366}
367
368/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
369/// successors to the newly created node.
370SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
371 SUnit *NewSU = Clone(SU);
372
373 // New SUnit has the exact same predecessors.
374 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
375 I != E; ++I)
376 if (!I->isSpecial) {
377 NewSU->addPred(I->Dep, I->isCtrl, false, I->Reg, I->Cost);
378 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
379 }
380
381 // Only copy scheduled successors. Cut them from old node's successor
382 // list and move them over.
383 SmallVector<SDep*, 2> DelDeps;
384 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
385 I != E; ++I) {
386 if (I->isSpecial)
387 continue;
388 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
389 if (I->Dep->isScheduled) {
390 I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost);
391 DelDeps.push_back(I);
392 }
393 }
394 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
395 SUnit *Succ = DelDeps[i]->Dep;
396 bool isCtrl = DelDeps[i]->isCtrl;
397 Succ->removePred(SU, isCtrl, false);
398 }
399
400 AvailableQueue->updateNode(SU);
401 AvailableQueue->addNode(NewSU);
402
403 return NewSU;
404}
405
406/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
407/// scheduling of the given node to satisfy live physical register dependencies.
408/// If the specific node is the last one that's available to schedule, do
409/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
410bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle){
411 if (LiveRegs.empty())
412 return false;
413
414 // If this node would clobber any "live" register, then it's not ready.
415 // However, if this is the last "available" node, then we may have to
416 // backtrack.
417 bool MustSched = AvailableQueue->empty();
418 SmallVector<unsigned, 4> LRegs;
419 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
420 I != E; ++I) {
421 if (I->Cost < 0) {
422 unsigned Reg = I->Reg;
423 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep)
424 LRegs.push_back(Reg);
425 for (const unsigned *Alias = MRI->getAliasSet(Reg);
426 *Alias; ++Alias)
427 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep)
428 LRegs.push_back(*Alias);
429 }
430 }
431
432 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
433 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
434 if (!Node->isTargetOpcode())
435 continue;
436 const TargetInstrDescriptor &TID = TII->get(Node->getTargetOpcode());
437 if (!TID.ImplicitDefs)
438 continue;
439 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
440 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU)
441 LRegs.push_back(*Reg);
442 for (const unsigned *Alias = MRI->getAliasSet(*Reg);
443 *Alias; ++Alias)
444 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU)
445 LRegs.push_back(*Alias);
446 }
447 }
448
449 if (MustSched && !LRegs.empty()) {
450 // We have made a mistake by scheduling some nodes too early. Now we must
451 // schedule the current node which will end up clobbering some live
452 // registers that are expensive / impossible to copy. Try unscheduling
453 // up to the point where it's safe to schedule the current node.
454 unsigned LiveCycle = CurCycle;
455 for (unsigned i = 0, e = LRegs.size(); i != e; ++i) {
456 unsigned Reg = LRegs[i];
457 unsigned LCycle = LiveRegCycles[Reg];
458 LiveCycle = std::min(LiveCycle, LCycle);
459 }
460
461 if (SU->CycleBound < LiveCycle) {
462 bool SuccUnsched = false;
463 SUnit *OldSU = BackTrackBottomUp(SU, LiveCycle, CurCycle, SuccUnsched);
464 // Force the current node to be scheduled before the node that
465 // requires the physical reg dep.
466 if (OldSU->isAvailable) {
467 OldSU->isAvailable = false;
468 AvailableQueue->remove(OldSU);
469 }
470 SU->addPred(OldSU, true, true);
471 // If a successor has been unscheduled, then it's not possible to
472 // schedule the current node.
473 return SuccUnsched;
474 } else {
475 // Try duplicating the nodes that produces these "expensive to copy"
476 // values to break the dependency.
477 for (unsigned i = 0, e = LRegs.size(); i != e; ++i) {
478 unsigned Reg = LRegs[i];
479 SUnit *LRDef = LiveRegDefs[Reg];
480 if (isSafeToCopy(LRDef->Node)) {
481 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
482 LiveRegDefs[Reg] = NewDef;
483 NewDef->addPred(SU, true, true);
484 SU->isAvailable = false;
485 AvailableQueue->push(NewDef);
486 } else {
487 assert(false && "Expensive copying is required?");
488 abort();
489 }
490 }
491 return true;
492 }
493 }
494 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +0000495}
496
497/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
498/// schedulers.
499void ScheduleDAGRRList::ListScheduleBottomUp() {
500 unsigned CurCycle = 0;
501 // Add root to Available queue.
Evan Cheng5924bf72007-09-25 01:54:36 +0000502 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
503 RootSU->isAvailable = true;
504 AvailableQueue->push(RootSU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000505
506 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000507 // priority. If it is not ready put it back. Schedule the node.
Evan Cheng5924bf72007-09-25 01:54:36 +0000508 SmallVector<SUnit*, 4> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000509 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000510 SUnit *CurSU = AvailableQueue->pop();
511 while (CurSU) {
512 if (CurSU->CycleBound <= CurCycle)
513 if (!DelayForLiveRegsBottomUp(CurSU, CurCycle))
514 break;
515
516 // Verify node is still ready. It may not be in case the
517 // scheduler has backtracked.
518 if (CurSU->isAvailable) {
519 CurSU->isPending = true;
520 NotReady.push_back(CurSU);
521 }
522 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000523 }
524
525 // Add the nodes that aren't ready back onto the available list.
Evan Cheng5924bf72007-09-25 01:54:36 +0000526 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
527 NotReady[i]->isPending = false;
528 if (NotReady[i]->isAvailable)
529 AvailableQueue->push(NotReady[i]);
530 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000531 NotReady.clear();
532
Evan Cheng5924bf72007-09-25 01:54:36 +0000533 if (!CurSU)
534 Sequence.push_back(0);
535 else {
536 ScheduleNodeBottomUp(CurSU, CurCycle);
537 Sequence.push_back(CurSU);
538 }
539 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000540 }
541
542 // Add entry node last
543 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000544 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000545 Sequence.push_back(Entry);
546 }
547
548 // Reverse the order if it is bottom up.
549 std::reverse(Sequence.begin(), Sequence.end());
550
551
552#ifndef NDEBUG
553 // Verify that all SUnits were scheduled.
554 bool AnyNotSched = false;
555 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
556 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
557 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000558 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000559 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000560 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000561 AnyNotSched = true;
562 }
563 }
564 assert(!AnyNotSched);
565#endif
566}
567
568//===----------------------------------------------------------------------===//
569// Top-Down Scheduling
570//===----------------------------------------------------------------------===//
571
572/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000573/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000574void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
575 unsigned CurCycle) {
576 // FIXME: the distance between two nodes is not always == the predecessor's
577 // latency. For example, the reader can very well read the register written
578 // by the predecessor later than the issue cycle. It also depends on the
579 // interrupt model (drain vs. freeze).
580 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
581
582 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000583 --SuccSU->NumPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000584 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000585 --SuccSU->NumChainPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000586
587#ifndef NDEBUG
588 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000589 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000590 SuccSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000591 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000592 assert(0);
593 }
594#endif
595
596 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
597 SuccSU->isAvailable = true;
598 AvailableQueue->push(SuccSU);
599 }
600}
601
602
603/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
604/// count of its successors. If a successor pending count is zero, add it to
605/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000606void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000607 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000608 DEBUG(SU->dump(&DAG));
609 SU->Cycle = CurCycle;
610
611 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000612
613 // Top down: release successors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000614 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
615 I != E; ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +0000616 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +0000617 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000618}
619
Dan Gohman54a187e2007-08-20 19:28:38 +0000620/// ListScheduleTopDown - The main loop of list scheduling for top-down
621/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +0000622void ScheduleDAGRRList::ListScheduleTopDown() {
623 unsigned CurCycle = 0;
Evan Cheng5924bf72007-09-25 01:54:36 +0000624 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000625
626 // All leaves to Available queue.
627 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
628 // It is available if it has no predecessors.
629 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
630 AvailableQueue->push(&SUnits[i]);
631 SUnits[i].isAvailable = true;
632 }
633 }
634
635 // Emit the entry node first.
636 ScheduleNodeTopDown(Entry, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000637 Sequence.push_back(Entry);
638 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000639
640 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000641 // priority. If it is not ready put it back. Schedule the node.
Evan Chengd38c22b2006-05-11 23:55:42 +0000642 std::vector<SUnit*> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000643 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000644 SUnit *CurSU = AvailableQueue->pop();
645 while (CurSU && CurSU->CycleBound > CurCycle) {
646 NotReady.push_back(CurSU);
647 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000648 }
649
650 // Add the nodes that aren't ready back onto the available list.
651 AvailableQueue->push_all(NotReady);
652 NotReady.clear();
653
Evan Cheng5924bf72007-09-25 01:54:36 +0000654 if (!CurSU)
655 Sequence.push_back(0);
656 else {
657 ScheduleNodeTopDown(CurSU, CurCycle);
658 Sequence.push_back(CurSU);
659 }
Evan Chengd12c97d2006-05-30 18:05:39 +0000660 CurCycle++;
Evan Chengd38c22b2006-05-11 23:55:42 +0000661 }
662
663
664#ifndef NDEBUG
665 // Verify that all SUnits were scheduled.
666 bool AnyNotSched = false;
667 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
668 if (!SUnits[i].isScheduled) {
669 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000670 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000671 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000672 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000673 AnyNotSched = true;
674 }
675 }
676 assert(!AnyNotSched);
677#endif
678}
679
680
681
682//===----------------------------------------------------------------------===//
683// RegReductionPriorityQueue Implementation
684//===----------------------------------------------------------------------===//
685//
686// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
687// to reduce register pressure.
688//
689namespace {
690 template<class SF>
691 class RegReductionPriorityQueue;
692
693 /// Sorting functions for the Available queue.
694 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
695 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
696 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
697 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
698
699 bool operator()(const SUnit* left, const SUnit* right) const;
700 };
701
702 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
703 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
704 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
705 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
706
707 bool operator()(const SUnit* left, const SUnit* right) const;
708 };
709} // end anonymous namespace
710
Evan Cheng961bbd32007-01-08 23:50:38 +0000711static inline bool isCopyFromLiveIn(const SUnit *SU) {
712 SDNode *N = SU->Node;
713 return N->getOpcode() == ISD::CopyFromReg &&
714 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
715}
716
Evan Chengd38c22b2006-05-11 23:55:42 +0000717namespace {
718 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000719 class VISIBILITY_HIDDEN RegReductionPriorityQueue
720 : public SchedulingPriorityQueue {
Evan Chengd38c22b2006-05-11 23:55:42 +0000721 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
722
723 public:
724 RegReductionPriorityQueue() :
725 Queue(SF(this)) {}
726
Evan Cheng5924bf72007-09-25 01:54:36 +0000727 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000728 std::vector<SUnit> &sunits) {}
Evan Cheng5924bf72007-09-25 01:54:36 +0000729
730 virtual void addNode(const SUnit *SU) {}
731
732 virtual void updateNode(const SUnit *SU) {}
733
Evan Chengd38c22b2006-05-11 23:55:42 +0000734 virtual void releaseState() {}
735
Evan Cheng6730f032007-01-08 23:55:53 +0000736 virtual unsigned getNodePriority(const SUnit *SU) const {
Evan Chengd38c22b2006-05-11 23:55:42 +0000737 return 0;
738 }
739
Evan Cheng5924bf72007-09-25 01:54:36 +0000740 unsigned size() const { return Queue.size(); }
741
Evan Chengd38c22b2006-05-11 23:55:42 +0000742 bool empty() const { return Queue.empty(); }
743
744 void push(SUnit *U) {
745 Queue.push(U);
746 }
747 void push_all(const std::vector<SUnit *> &Nodes) {
748 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
749 Queue.push(Nodes[i]);
750 }
751
752 SUnit *pop() {
Evan Chengd12c97d2006-05-30 18:05:39 +0000753 if (empty()) return NULL;
Evan Chengd38c22b2006-05-11 23:55:42 +0000754 SUnit *V = Queue.top();
755 Queue.pop();
756 return V;
757 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000758
Evan Cheng5924bf72007-09-25 01:54:36 +0000759 /// remove - This is a really inefficient way to remove a node from a
760 /// priority queue. We should roll our own heap to make this better or
761 /// something.
762 void remove(SUnit *SU) {
763 std::vector<SUnit*> Temp;
764
765 assert(!Queue.empty() && "Not in queue!");
766 while (Queue.top() != SU) {
767 Temp.push_back(Queue.top());
768 Queue.pop();
769 assert(!Queue.empty() && "Not in queue!");
770 }
771
772 // Remove the node from the PQ.
773 Queue.pop();
774
775 // Add all the other nodes back.
776 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
777 Queue.push(Temp[i]);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000778 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000779 };
780
781 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000782 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
783 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000784 // SUnitMap SDNode to SUnit mapping (n -> n).
785 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000786
Evan Chengd38c22b2006-05-11 23:55:42 +0000787 // SUnits - The SUnits for the current graph.
788 const std::vector<SUnit> *SUnits;
789
790 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000791 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000792
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000793 const TargetInstrInfo *TII;
Evan Chengd38c22b2006-05-11 23:55:42 +0000794 public:
Dan Gohman54a187e2007-08-20 19:28:38 +0000795 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000796 : TII(tii) {}
Evan Chengd38c22b2006-05-11 23:55:42 +0000797
Evan Cheng5924bf72007-09-25 01:54:36 +0000798 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000799 std::vector<SUnit> &sunits) {
800 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000801 SUnits = &sunits;
802 // Add pseudo dependency edges for two-address nodes.
Evan Chengafed73e2006-05-12 01:58:24 +0000803 AddPseudoTwoAddrDeps();
Evan Chengd38c22b2006-05-11 23:55:42 +0000804 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000805 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000806 }
807
Evan Cheng5924bf72007-09-25 01:54:36 +0000808 void addNode(const SUnit *SU) {
809 SethiUllmanNumbers.resize(SUnits->size(), 0);
810 CalcNodeSethiUllmanNumber(SU);
811 }
812
813 void updateNode(const SUnit *SU) {
814 SethiUllmanNumbers[SU->NodeNum] = 0;
815 CalcNodeSethiUllmanNumber(SU);
816 }
817
Evan Chengd38c22b2006-05-11 23:55:42 +0000818 void releaseState() {
819 SUnits = 0;
820 SethiUllmanNumbers.clear();
821 }
822
Evan Cheng6730f032007-01-08 23:55:53 +0000823 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000824 assert(SU->NodeNum < SethiUllmanNumbers.size());
825 unsigned Opc = SU->Node->getOpcode();
826 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
827 // CopyFromReg should be close to its def because it restricts
828 // allocation choices. But if it is a livein then perhaps we want it
829 // closer to its uses so it can be coalesced.
830 return 0xffff;
831 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
832 // CopyToReg should be close to its uses to facilitate coalescing and
833 // avoid spilling.
834 return 0;
835 else if (SU->NumSuccs == 0)
836 // If SU does not have a use, i.e. it doesn't produce a value that would
837 // be consumed (e.g. store), then it terminates a chain of computation.
838 // Give it a large SethiUllman number so it will be scheduled right
839 // before its predecessors that it doesn't lengthen their live ranges.
840 return 0xffff;
841 else if (SU->NumPreds == 0)
842 // If SU does not have a def, schedule it close to its uses because it
843 // does not lengthen any live ranges.
844 return 0;
845 else
846 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +0000847 }
848
849 private:
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000850 bool canClobber(SUnit *SU, SUnit *Op);
Evan Chengd38c22b2006-05-11 23:55:42 +0000851 void AddPseudoTwoAddrDeps();
Evan Cheng6730f032007-01-08 23:55:53 +0000852 void CalculateSethiUllmanNumbers();
853 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000854 };
855
856
857 template<class SF>
Dan Gohman54a187e2007-08-20 19:28:38 +0000858 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
859 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000860 // SUnitMap SDNode to SUnit mapping (n -> n).
861 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000862
Evan Chengd38c22b2006-05-11 23:55:42 +0000863 // SUnits - The SUnits for the current graph.
864 const std::vector<SUnit> *SUnits;
865
866 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000867 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000868
869 public:
870 TDRegReductionPriorityQueue() {}
871
Evan Cheng5924bf72007-09-25 01:54:36 +0000872 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000873 std::vector<SUnit> &sunits) {
874 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000875 SUnits = &sunits;
876 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000877 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000878 }
879
Evan Cheng5924bf72007-09-25 01:54:36 +0000880 void addNode(const SUnit *SU) {
881 SethiUllmanNumbers.resize(SUnits->size(), 0);
882 CalcNodeSethiUllmanNumber(SU);
883 }
884
885 void updateNode(const SUnit *SU) {
886 SethiUllmanNumbers[SU->NodeNum] = 0;
887 CalcNodeSethiUllmanNumber(SU);
888 }
889
Evan Chengd38c22b2006-05-11 23:55:42 +0000890 void releaseState() {
891 SUnits = 0;
892 SethiUllmanNumbers.clear();
893 }
894
Evan Cheng6730f032007-01-08 23:55:53 +0000895 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000896 assert(SU->NodeNum < SethiUllmanNumbers.size());
897 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +0000898 }
899
900 private:
Evan Cheng6730f032007-01-08 23:55:53 +0000901 void CalculateSethiUllmanNumbers();
902 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000903 };
904}
905
Evan Chengb9e3db62007-03-14 22:43:40 +0000906/// closestSucc - Returns the scheduled cycle of the successor which is
907/// closet to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +0000908static unsigned closestSucc(const SUnit *SU) {
909 unsigned MaxCycle = 0;
910 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +0000911 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000912 unsigned Cycle = I->Dep->Cycle;
Evan Chengb9e3db62007-03-14 22:43:40 +0000913 // If there are bunch of CopyToRegs stacked up, they should be considered
914 // to be at the same position.
Evan Cheng0effc3a2007-09-19 01:38:40 +0000915 if (I->Dep->Node->getOpcode() == ISD::CopyToReg)
916 Cycle = closestSucc(I->Dep)+1;
Evan Chengb9e3db62007-03-14 22:43:40 +0000917 if (Cycle > MaxCycle)
918 MaxCycle = Cycle;
919 }
Evan Cheng28748552007-03-13 23:25:11 +0000920 return MaxCycle;
921}
922
Evan Chengb9e3db62007-03-14 22:43:40 +0000923/// calcMaxScratches - Returns an cost estimate of the worse case requirement
924/// for scratch registers. Live-in operands and live-out results don't count
925/// since they are "fixed".
926static unsigned calcMaxScratches(const SUnit *SU) {
927 unsigned Scratches = 0;
928 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
929 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000930 if (I->isCtrl) continue; // ignore chain preds
931 if (I->Dep->Node->getOpcode() != ISD::CopyFromReg)
Evan Chengb9e3db62007-03-14 22:43:40 +0000932 Scratches++;
933 }
934 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
935 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000936 if (I->isCtrl) continue; // ignore chain succs
937 if (I->Dep->Node->getOpcode() != ISD::CopyToReg)
Evan Chengb9e3db62007-03-14 22:43:40 +0000938 Scratches += 10;
939 }
940 return Scratches;
941}
942
Evan Chengd38c22b2006-05-11 23:55:42 +0000943// Bottom up
944bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
David Greene4c1e6f32007-06-29 03:42:23 +0000945 // There used to be a special tie breaker here that looked for
David Greene5b6f7552007-06-29 02:48:09 +0000946 // two-address instructions and preferred the instruction with a
947 // def&use operand. The special case triggered diagnostics when
948 // _GLIBCXX_DEBUG was enabled because it broke the strict weak
949 // ordering that priority_queue requires. It didn't help much anyway
950 // because AddPseudoTwoAddrDeps already covers many of the cases
951 // where it would have applied. In addition, it's counter-intuitive
952 // that a tie breaker would be the first thing attempted. There's a
953 // "real" tie breaker below that is the operation of last resort.
954 // The fact that the "special tie breaker" would trigger when there
955 // wasn't otherwise a tie is what broke the strict weak ordering
956 // constraint.
Evan Cheng99f2f792006-05-13 08:22:24 +0000957
Evan Cheng6730f032007-01-08 23:55:53 +0000958 unsigned LPriority = SPQ->getNodePriority(left);
959 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng961bbd32007-01-08 23:50:38 +0000960 if (LPriority > RPriority)
Evan Chengd38c22b2006-05-11 23:55:42 +0000961 return true;
Evan Cheng28748552007-03-13 23:25:11 +0000962 else if (LPriority == RPriority) {
Dan Gohmane131e3a2007-04-26 19:40:56 +0000963 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
Evan Cheng28748552007-03-13 23:25:11 +0000964 // e.g.
965 // t1 = op t2, c1
966 // t3 = op t4, c2
967 //
968 // and the following instructions are both ready.
969 // t2 = op c3
970 // t4 = op c4
971 //
972 // Then schedule t2 = op first.
973 // i.e.
974 // t4 = op c4
975 // t2 = op c3
976 // t1 = op t2, c1
977 // t3 = op t4, c2
978 //
979 // This creates more short live intervals.
980 unsigned LDist = closestSucc(left);
981 unsigned RDist = closestSucc(right);
982 if (LDist < RDist)
Evan Chengd38c22b2006-05-11 23:55:42 +0000983 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +0000984 else if (LDist == RDist) {
985 // Intuitively, it's good to push down instructions whose results are
986 // liveout so their long live ranges won't conflict with other values
987 // which are needed inside the BB. Further prioritize liveout instructions
988 // by the number of operands which are calculated within the BB.
989 unsigned LScratch = calcMaxScratches(left);
990 unsigned RScratch = calcMaxScratches(right);
991 if (LScratch > RScratch)
Evan Chengd38c22b2006-05-11 23:55:42 +0000992 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +0000993 else if (LScratch == RScratch)
994 if (left->Height > right->Height)
Evan Cheng99f2f792006-05-13 08:22:24 +0000995 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +0000996 else if (left->Height == right->Height)
997 if (left->Depth < right->Depth)
Evan Cheng28748552007-03-13 23:25:11 +0000998 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +0000999 else if (left->Depth == right->Depth)
1000 if (left->CycleBound > right->CycleBound)
1001 return true;
1002 }
Evan Cheng28748552007-03-13 23:25:11 +00001003 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001004 return false;
1005}
1006
Evan Chengd38c22b2006-05-11 23:55:42 +00001007// FIXME: This is probably too slow!
1008static void isReachable(SUnit *SU, SUnit *TargetSU,
Evan Chenge3c44192007-06-22 01:35:51 +00001009 SmallPtrSet<SUnit*, 32> &Visited, bool &Reached) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001010 if (Reached) return;
1011 if (SU == TargetSU) {
1012 Reached = true;
1013 return;
1014 }
Evan Chenge3c44192007-06-22 01:35:51 +00001015 if (!Visited.insert(SU)) return;
Evan Chengd38c22b2006-05-11 23:55:42 +00001016
Chris Lattnerd86418a2006-08-17 00:09:56 +00001017 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
1018 ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001019 isReachable(I->Dep, TargetSU, Visited, Reached);
Evan Chengd38c22b2006-05-11 23:55:42 +00001020}
1021
1022static bool isReachable(SUnit *SU, SUnit *TargetSU) {
Evan Chenge3c44192007-06-22 01:35:51 +00001023 SmallPtrSet<SUnit*, 32> Visited;
Evan Chengd38c22b2006-05-11 23:55:42 +00001024 bool Reached = false;
1025 isReachable(SU, TargetSU, Visited, Reached);
1026 return Reached;
1027}
1028
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001029template<class SF>
1030bool BURegReductionPriorityQueue<SF>::canClobber(SUnit *SU, SUnit *Op) {
1031 if (SU->isTwoAddress) {
1032 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001033 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001034 unsigned NumOps = ScheduleDAG::CountOperands(SU->Node);
1035 for (unsigned i = 0; i != NumOps; ++i) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001036 if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001037 SDNode *DU = SU->Node->getOperand(i).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001038 if (Op == (*SUnitMap)[DU][SU->InstanceNo])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001039 return true;
1040 }
1041 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001042 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001043 return false;
1044}
1045
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001046
Evan Chengd38c22b2006-05-11 23:55:42 +00001047/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1048/// it as a def&use operand. Add a pseudo control edge from it to the other
1049/// node (if it won't create a cycle) so the two-address one will be scheduled
1050/// first (lower in the schedule).
1051template<class SF>
1052void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001053 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1054 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1055 if (!SU->isTwoAddress)
1056 continue;
1057
1058 SDNode *Node = SU->Node;
1059 if (!Node->isTargetOpcode())
1060 continue;
1061
1062 unsigned Opc = Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001063 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001064 unsigned NumOps = ScheduleDAG::CountOperands(Node);
1065 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001066 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001067 SDNode *DU = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001068 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
Evan Chengf24d15f2006-11-06 21:33:46 +00001069 if (!DUSU) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001070 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1071 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001072 if (I->isCtrl) continue;
1073 SUnit *SuccSU = I->Dep;
Evan Cheng5924bf72007-09-25 01:54:36 +00001074 // Don't constraint nodes with implicit defs. It can create cycles
1075 // plus it may increase register pressures.
1076 if (SuccSU == SU || SuccSU->hasImplicitDefs)
1077 continue;
1078 // Be conservative. Ignore if nodes aren't at the same depth.
1079 if (SuccSU->Depth != SU->Depth)
1080 continue;
1081 if ((!canClobber(SuccSU, DUSU) ||
1082 (!SU->isCommutable && SuccSU->isCommutable)) &&
1083 !isReachable(SuccSU, SU)) {
1084 DOUT << "Adding an edge from SU # " << SU->NodeNum
1085 << " to SU #" << SuccSU->NodeNum << "\n";
1086 SU->addPred(SuccSU, true, true);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001087 }
1088 }
1089 }
1090 }
1091 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001092}
1093
Evan Cheng6730f032007-01-08 23:55:53 +00001094/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001095/// Smaller number is the higher priority.
1096template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001097unsigned BURegReductionPriorityQueue<SF>::
1098CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001099 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001100 if (SethiUllmanNumber != 0)
1101 return SethiUllmanNumber;
1102
Evan Cheng961bbd32007-01-08 23:50:38 +00001103 unsigned Extra = 0;
1104 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1105 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001106 if (I->isCtrl) continue; // ignore chain preds
1107 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001108 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Cheng961bbd32007-01-08 23:50:38 +00001109 if (PredSethiUllman > SethiUllmanNumber) {
1110 SethiUllmanNumber = PredSethiUllman;
1111 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001112 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001113 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001114 }
Evan Cheng961bbd32007-01-08 23:50:38 +00001115
1116 SethiUllmanNumber += Extra;
1117
1118 if (SethiUllmanNumber == 0)
1119 SethiUllmanNumber = 1;
Evan Chengd38c22b2006-05-11 23:55:42 +00001120
1121 return SethiUllmanNumber;
1122}
1123
Evan Cheng6730f032007-01-08 23:55:53 +00001124/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1125/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001126template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001127void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001128 SethiUllmanNumbers.assign(SUnits->size(), 0);
1129
1130 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001131 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001132}
1133
1134static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
1135 unsigned Sum = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001136 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1137 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001138 SUnit *SuccSU = I->Dep;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001139 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1140 EE = SuccSU->Preds.end(); II != EE; ++II) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001141 SUnit *PredSU = II->Dep;
Evan Chengd38c22b2006-05-11 23:55:42 +00001142 if (!PredSU->isScheduled)
Evan Cheng5924bf72007-09-25 01:54:36 +00001143 ++Sum;
Evan Chengd38c22b2006-05-11 23:55:42 +00001144 }
1145 }
1146
1147 return Sum;
1148}
1149
1150
1151// Top down
1152bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001153 unsigned LPriority = SPQ->getNodePriority(left);
1154 unsigned RPriority = SPQ->getNodePriority(right);
Evan Chengd38c22b2006-05-11 23:55:42 +00001155 bool LIsTarget = left->Node->isTargetOpcode();
1156 bool RIsTarget = right->Node->isTargetOpcode();
1157 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1158 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1159 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
1160 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
1161
1162 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1163 return false;
1164 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1165 return true;
1166
1167 // Special tie breaker: if two nodes share a operand, the one that use it
1168 // as a def&use operand is preferred.
1169 if (LIsTarget && RIsTarget) {
1170 if (left->isTwoAddress && !right->isTwoAddress) {
1171 SDNode *DUNode = left->Node->getOperand(0).Val;
1172 if (DUNode->isOperand(right->Node))
1173 RBonus += 2;
1174 }
1175 if (!left->isTwoAddress && right->isTwoAddress) {
1176 SDNode *DUNode = right->Node->getOperand(0).Val;
1177 if (DUNode->isOperand(left->Node))
1178 LBonus += 2;
1179 }
1180 }
1181 if (LIsFloater)
1182 LBonus -= 2;
1183 if (RIsFloater)
1184 RBonus -= 2;
1185 if (left->NumSuccs == 1)
1186 LBonus += 2;
1187 if (right->NumSuccs == 1)
1188 RBonus += 2;
1189
1190 if (LPriority+LBonus < RPriority+RBonus)
1191 return true;
1192 else if (LPriority == RPriority)
1193 if (left->Depth < right->Depth)
1194 return true;
1195 else if (left->Depth == right->Depth)
1196 if (left->NumSuccsLeft > right->NumSuccsLeft)
1197 return true;
1198 else if (left->NumSuccsLeft == right->NumSuccsLeft)
1199 if (left->CycleBound > right->CycleBound)
1200 return true;
1201 return false;
1202}
1203
Evan Cheng6730f032007-01-08 23:55:53 +00001204/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001205/// Smaller number is the higher priority.
1206template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001207unsigned TDRegReductionPriorityQueue<SF>::
1208CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001209 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001210 if (SethiUllmanNumber != 0)
1211 return SethiUllmanNumber;
1212
1213 unsigned Opc = SU->Node->getOpcode();
1214 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
Evan Cheng961bbd32007-01-08 23:50:38 +00001215 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001216 else if (SU->NumSuccsLeft == 0)
1217 // If SU does not have a use, i.e. it doesn't produce a value that would
1218 // be consumed (e.g. store), then it terminates a chain of computation.
Chris Lattner296a83c2007-02-01 04:55:59 +00001219 // Give it a small SethiUllman number so it will be scheduled right before
1220 // its predecessors that it doesn't lengthen their live ranges.
Evan Cheng961bbd32007-01-08 23:50:38 +00001221 SethiUllmanNumber = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001222 else if (SU->NumPredsLeft == 0 &&
1223 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
Evan Cheng961bbd32007-01-08 23:50:38 +00001224 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001225 else {
1226 int Extra = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001227 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1228 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001229 if (I->isCtrl) continue; // ignore chain preds
1230 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001231 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001232 if (PredSethiUllman > SethiUllmanNumber) {
1233 SethiUllmanNumber = PredSethiUllman;
1234 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001235 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001236 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001237 }
1238
1239 SethiUllmanNumber += Extra;
1240 }
1241
1242 return SethiUllmanNumber;
1243}
1244
Evan Cheng6730f032007-01-08 23:55:53 +00001245/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1246/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001247template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001248void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001249 SethiUllmanNumbers.assign(SUnits->size(), 0);
1250
1251 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001252 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001253}
1254
1255//===----------------------------------------------------------------------===//
1256// Public Constructor Functions
1257//===----------------------------------------------------------------------===//
1258
Jim Laskey03593f72006-08-01 18:29:48 +00001259llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1260 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001261 MachineBasicBlock *BB) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001262 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
Jim Laskey95eda5b2006-08-01 14:21:23 +00001263 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001264 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII));
Evan Chengd38c22b2006-05-11 23:55:42 +00001265}
1266
Jim Laskey03593f72006-08-01 18:29:48 +00001267llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1268 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001269 MachineBasicBlock *BB) {
Jim Laskey95eda5b2006-08-01 14:21:23 +00001270 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
Chris Lattner296a83c2007-02-01 04:55:59 +00001271 new TDRegReductionPriorityQueue<td_ls_rr_sort>());
Evan Chengd38c22b2006-05-11 23:55:42 +00001272}
1273