LLVM 22.0.0git
SparcISelLowering.cpp
Go to the documentation of this file.
1//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Sparc uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SparcISelLowering.h"
17#include "SparcRegisterInfo.h"
19#include "SparcTargetMachine.h"
35#include "llvm/IR/Function.h"
36#include "llvm/IR/IRBuilder.h"
37#include "llvm/IR/Module.h"
40using namespace llvm;
41
42
43//===----------------------------------------------------------------------===//
44// Calling Convention Implementation
45//===----------------------------------------------------------------------===//
46
47static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
48 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags, CCState &State)
50{
51 assert (ArgFlags.isSRet());
52
53 // Assign SRet argument.
54 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
55 0,
56 LocVT, LocInfo));
57 return true;
58}
59
60static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
61 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
62 ISD::ArgFlagsTy &ArgFlags, CCState &State)
63{
64 static const MCPhysReg RegList[] = {
65 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
66 };
67 // Try to get first reg.
68 if (Register Reg = State.AllocateReg(RegList)) {
69 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
70 } else {
71 // Assign whole thing in stack.
72 State.addLoc(CCValAssign::getCustomMem(
73 ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
74 return true;
75 }
76
77 // Try to get second reg.
78 if (Register Reg = State.AllocateReg(RegList))
79 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
80 else
81 State.addLoc(CCValAssign::getCustomMem(
82 ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
83 return true;
84}
85
86static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
87 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
88 ISD::ArgFlagsTy &ArgFlags, CCState &State)
89{
90 static const MCPhysReg RegList[] = {
91 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
92 };
93
94 // Try to get first reg.
95 if (Register Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
97 else
98 return false;
99
100 // Try to get second reg.
101 if (Register Reg = State.AllocateReg(RegList))
102 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 else
104 return false;
105
106 return true;
107}
108
109// Allocate a full-sized argument for the 64-bit ABI.
110static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT,
111 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
112 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
113 assert((LocVT == MVT::f32 || LocVT == MVT::f128
114 || LocVT.getSizeInBits() == 64) &&
115 "Can't handle non-64 bits locations");
116
117 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
118 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
119 Align alignment =
120 (LocVT == MVT::f128 || ArgFlags.isSplit()) ? Align(16) : Align(8);
121 unsigned Offset = State.AllocateStack(size, alignment);
122 unsigned Reg = 0;
123
124 if (LocVT == MVT::i64 && Offset < 6*8)
125 // Promote integers to %i0-%i5.
126 Reg = SP::I0 + Offset/8;
127 else if (LocVT == MVT::f64 && Offset < 16*8)
128 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
129 Reg = SP::D0 + Offset/8;
130 else if (LocVT == MVT::f32 && Offset < 16*8)
131 // Promote floats to %f1, %f3, ...
132 Reg = SP::F1 + Offset/4;
133 else if (LocVT == MVT::f128 && Offset < 16*8)
134 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
135 Reg = SP::Q0 + Offset/16;
136
137 // Promote to register when possible, otherwise use the stack slot.
138 if (Reg) {
139 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
140 return true;
141 }
142
143 // Bail out if this is a return CC and we run out of registers to place
144 // values into.
145 if (IsReturn)
146 return false;
147
148 // This argument goes on the stack in an 8-byte slot.
149 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
150 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
151 if (LocVT == MVT::f32)
152 Offset += 4;
153
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return true;
156}
157
158// Allocate a half-sized argument for the 64-bit ABI.
159//
160// This is used when passing { float, int } structs by value in registers.
161static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT,
162 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
163 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
164 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
165 unsigned Offset = State.AllocateStack(4, Align(4));
166
167 if (LocVT == MVT::f32 && Offset < 16*8) {
168 // Promote floats to %f0-%f31.
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
170 LocVT, LocInfo));
171 return true;
172 }
173
174 if (LocVT == MVT::i32 && Offset < 6*8) {
175 // Promote integers to %i0-%i5, using half the register.
176 unsigned Reg = SP::I0 + Offset/8;
177 LocVT = MVT::i64;
178 LocInfo = CCValAssign::AExt;
179
180 // Set the Custom bit if this i32 goes in the high bits of a register.
181 if (Offset % 8 == 0)
182 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
183 LocVT, LocInfo));
184 else
185 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
186 return true;
187 }
188
189 // Bail out if this is a return CC and we run out of registers to place
190 // values into.
191 if (IsReturn)
192 return false;
193
194 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
195 return true;
196}
197
198static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
199 CCValAssign::LocInfo &LocInfo,
200 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
201 return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
202 State);
203}
204
205static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
206 CCValAssign::LocInfo &LocInfo,
207 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
208 return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
209 State);
210}
211
212static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
213 CCValAssign::LocInfo &LocInfo,
214 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
215 return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
216 State);
217}
218
219static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
220 CCValAssign::LocInfo &LocInfo,
221 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
222 return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags,
223 State);
224}
225
226#include "SparcGenCallingConv.inc"
227
228// The calling conventions in SparcCallingConv.td are described in terms of the
229// callee's register window. This function translates registers to the
230// corresponding caller window %o register.
231static unsigned toCallerWindow(unsigned Reg) {
232 static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
233 "Unexpected enum");
234 if (Reg >= SP::I0 && Reg <= SP::I7)
235 return Reg - SP::I0 + SP::O0;
236 return Reg;
237}
238
240 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
241 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context,
242 const Type *RetTy) const {
244 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
245 return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64
246 : RetCC_Sparc32);
247}
248
251 bool IsVarArg,
253 const SmallVectorImpl<SDValue> &OutVals,
254 const SDLoc &DL, SelectionDAG &DAG) const {
255 if (Subtarget->is64Bit())
256 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
257 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
258}
259
262 bool IsVarArg,
264 const SmallVectorImpl<SDValue> &OutVals,
265 const SDLoc &DL, SelectionDAG &DAG) const {
267
268 // CCValAssign - represent the assignment of the return value to locations.
270
271 // CCState - Info about the registers and stack slot.
272 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
273 *DAG.getContext());
274
275 // Analyze return values.
276 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
277
278 SDValue Glue;
279 SmallVector<SDValue, 4> RetOps(1, Chain);
280 // Make room for the return address offset.
281 RetOps.push_back(SDValue());
282
283 // Copy the result values into the output registers.
284 for (unsigned i = 0, realRVLocIdx = 0;
285 i != RVLocs.size();
286 ++i, ++realRVLocIdx) {
287 CCValAssign &VA = RVLocs[i];
288 assert(VA.isRegLoc() && "Can only return in registers!");
289
290 SDValue Arg = OutVals[realRVLocIdx];
291
292 if (VA.needsCustom()) {
293 assert(VA.getLocVT() == MVT::v2i32);
294 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
295 // happen by default if this wasn't a legal type)
296
297 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
298 Arg,
300 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
301 Arg,
303
304 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Glue);
305 Glue = Chain.getValue(1);
306 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
307 VA = RVLocs[++i]; // skip ahead to next loc
308 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
309 Glue);
310 } else
311 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Glue);
312
313 // Guarantee that all emitted copies are stuck together with flags.
314 Glue = Chain.getValue(1);
315 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
316 }
317
318 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
319 // If the function returns a struct, copy the SRetReturnReg to I0
320 if (MF.getFunction().hasStructRetAttr()) {
322 Register Reg = SFI->getSRetReturnReg();
323 if (!Reg)
324 llvm_unreachable("sret virtual register not created in the entry block");
325 auto PtrVT = getPointerTy(DAG.getDataLayout());
326 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
327 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Glue);
328 Glue = Chain.getValue(1);
329 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
330 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
331 }
332
333 RetOps[0] = Chain; // Update chain.
334 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
335
336 // Add the glue if we have it.
337 if (Glue.getNode())
338 RetOps.push_back(Glue);
339
340 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
341}
342
343// Lower return values for the 64-bit ABI.
344// Return values are passed the exactly the same way as function arguments.
347 bool IsVarArg,
349 const SmallVectorImpl<SDValue> &OutVals,
350 const SDLoc &DL, SelectionDAG &DAG) const {
351 // CCValAssign - represent the assignment of the return value to locations.
353
354 // CCState - Info about the registers and stack slot.
355 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
356 *DAG.getContext());
357
358 // Analyze return values.
359 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
360
361 SDValue Glue;
362 SmallVector<SDValue, 4> RetOps(1, Chain);
363
364 // The second operand on the return instruction is the return address offset.
365 // The return address is always %i7+8 with the 64-bit ABI.
366 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
367
368 // Copy the result values into the output registers.
369 for (unsigned i = 0; i != RVLocs.size(); ++i) {
370 CCValAssign &VA = RVLocs[i];
371 assert(VA.isRegLoc() && "Can only return in registers!");
372 SDValue OutVal = OutVals[i];
373
374 // Integer return values must be sign or zero extended by the callee.
375 switch (VA.getLocInfo()) {
376 case CCValAssign::Full: break;
378 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
379 break;
381 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
382 break;
384 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
385 break;
386 default:
387 llvm_unreachable("Unknown loc info!");
388 }
389
390 // The custom bit on an i32 return value indicates that it should be passed
391 // in the high bits of the register.
392 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
393 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
394 DAG.getConstant(32, DL, MVT::i32));
395
396 // The next value may go in the low bits of the same register.
397 // Handle both at once.
398 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
399 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
400 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
401 // Skip the next value, it's already done.
402 ++i;
403 }
404 }
405
406 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Glue);
407
408 // Guarantee that all emitted copies are stuck together with flags.
409 Glue = Chain.getValue(1);
410 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
411 }
412
413 RetOps[0] = Chain; // Update chain.
414
415 // Add the flag if we have it.
416 if (Glue.getNode())
417 RetOps.push_back(Glue);
418
419 return DAG.getNode(SPISD::RET_GLUE, DL, MVT::Other, RetOps);
420}
421
423 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
424 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
425 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
426 if (Subtarget->is64Bit())
427 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
428 DL, DAG, InVals);
429 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
430 DL, DAG, InVals);
431}
432
433/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
434/// passed in either one or two GPRs, including FP values. TODO: we should
435/// pass FP values in FP registers for fastcc functions.
437 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
438 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
439 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
441 MachineRegisterInfo &RegInfo = MF.getRegInfo();
443
444 // Assign locations to all of the incoming arguments.
446 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
447 *DAG.getContext());
448 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
449
450 const unsigned StackOffset = 92;
451 bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
452
453 unsigned InIdx = 0;
454 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
455 CCValAssign &VA = ArgLocs[i];
456
457 if (Ins[InIdx].Flags.isSRet()) {
458 if (InIdx != 0)
459 report_fatal_error("sparc only supports sret on the first parameter");
460 // Get SRet from [%fp+64].
461 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
462 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
463 SDValue Arg =
464 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
465 InVals.push_back(Arg);
466 continue;
467 }
468
469 if (VA.isRegLoc()) {
470 if (VA.needsCustom()) {
471 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
472
473 Register VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
474 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
475 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
476
477 assert(i+1 < e);
478 CCValAssign &NextVA = ArgLocs[++i];
479
480 SDValue LoVal;
481 if (NextVA.isMemLoc()) {
482 int FrameIdx = MF.getFrameInfo().
483 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
484 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
485 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
486 } else {
487 Register loReg = MF.addLiveIn(NextVA.getLocReg(),
488 &SP::IntRegsRegClass);
489 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
490 }
491
492 if (IsLittleEndian)
493 std::swap(LoVal, HiVal);
494
495 SDValue WholeValue =
496 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
497 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
498 InVals.push_back(WholeValue);
499 continue;
500 }
501 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
502 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
503 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
504 if (VA.getLocVT() == MVT::f32)
505 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
506 else if (VA.getLocVT() != MVT::i32) {
507 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
508 DAG.getValueType(VA.getLocVT()));
509 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
510 }
511 InVals.push_back(Arg);
512 continue;
513 }
514
515 assert(VA.isMemLoc());
516
517 unsigned Offset = VA.getLocMemOffset()+StackOffset;
518 auto PtrVT = getPointerTy(DAG.getDataLayout());
519
520 if (VA.needsCustom()) {
521 assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
522 // If it is double-word aligned, just load.
523 if (Offset % 8 == 0) {
524 int FI = MF.getFrameInfo().CreateFixedObject(8,
525 Offset,
526 true);
527 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
528 SDValue Load =
529 DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
530 InVals.push_back(Load);
531 continue;
532 }
533
534 int FI = MF.getFrameInfo().CreateFixedObject(4,
535 Offset,
536 true);
537 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
538 SDValue HiVal =
539 DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
540 int FI2 = MF.getFrameInfo().CreateFixedObject(4,
541 Offset+4,
542 true);
543 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
544
545 SDValue LoVal =
546 DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
547
548 if (IsLittleEndian)
549 std::swap(LoVal, HiVal);
550
551 SDValue WholeValue =
552 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
553 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
554 InVals.push_back(WholeValue);
555 continue;
556 }
557
558 int FI = MF.getFrameInfo().CreateFixedObject(4,
559 Offset,
560 true);
561 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
562 SDValue Load ;
563 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
564 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
565 } else if (VA.getValVT() == MVT::f128) {
566 report_fatal_error("SPARCv8 does not handle f128 in calls; "
567 "pass indirectly");
568 } else {
569 // We shouldn't see any other value types here.
570 llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
571 }
572 InVals.push_back(Load);
573 }
574
575 if (MF.getFunction().hasStructRetAttr()) {
576 // Copy the SRet Argument to SRetReturnReg.
578 Register Reg = SFI->getSRetReturnReg();
579 if (!Reg) {
580 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
581 SFI->setSRetReturnReg(Reg);
582 }
583 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
584 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
585 }
586
587 // Store remaining ArgRegs to the stack if this is a varargs function.
588 if (isVarArg) {
589 static const MCPhysReg ArgRegs[] = {
590 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
591 };
592 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
593 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
594 unsigned ArgOffset = CCInfo.getStackSize();
595 if (NumAllocated == 6)
596 ArgOffset += StackOffset;
597 else {
598 assert(!ArgOffset);
599 ArgOffset = 68+4*NumAllocated;
600 }
601
602 // Remember the vararg offset for the va_start implementation.
603 FuncInfo->setVarArgsFrameOffset(ArgOffset);
604
605 std::vector<SDValue> OutChains;
606
607 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
608 Register VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
609 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
610 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
611
612 int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
613 true);
614 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
615
616 OutChains.push_back(
617 DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
618 ArgOffset += 4;
619 }
620
621 if (!OutChains.empty()) {
622 OutChains.push_back(Chain);
623 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
624 }
625 }
626
627 return Chain;
628}
629
630// Lower formal arguments for the 64 bit ABI.
632 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
633 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
634 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
636
637 // Analyze arguments according to CC_Sparc64.
639 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
640 *DAG.getContext());
641 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
642
643 // The argument array begins at %fp+BIAS+128, after the register save area.
644 const unsigned ArgArea = 128;
645
646 for (const CCValAssign &VA : ArgLocs) {
647 if (VA.isRegLoc()) {
648 // This argument is passed in a register.
649 // All integer register arguments are promoted by the caller to i64.
650
651 // Create a virtual register for the promoted live-in value.
652 Register VReg = MF.addLiveIn(VA.getLocReg(),
653 getRegClassFor(VA.getLocVT()));
654 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
655
656 // Get the high bits for i32 struct elements.
657 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
658 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
659 DAG.getConstant(32, DL, MVT::i32));
660
661 // The caller promoted the argument, so insert an Assert?ext SDNode so we
662 // won't promote the value again in this function.
663 switch (VA.getLocInfo()) {
665 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
666 DAG.getValueType(VA.getValVT()));
667 break;
669 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
670 DAG.getValueType(VA.getValVT()));
671 break;
672 default:
673 break;
674 }
675
676 // Truncate the register down to the argument type.
677 if (VA.isExtInLoc())
678 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
679
680 InVals.push_back(Arg);
681 continue;
682 }
683
684 // The registers are exhausted. This argument was passed on the stack.
685 assert(VA.isMemLoc());
686 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
687 // beginning of the arguments area at %fp+BIAS+128.
688 unsigned Offset = VA.getLocMemOffset() + ArgArea;
689 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
690 // Adjust offset for extended arguments, SPARC is big-endian.
691 // The caller will have written the full slot with extended bytes, but we
692 // prefer our own extending loads.
693 if (VA.isExtInLoc())
694 Offset += 8 - ValSize;
695 int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
696 InVals.push_back(
697 DAG.getLoad(VA.getValVT(), DL, Chain,
700 }
701
702 if (!IsVarArg)
703 return Chain;
704
705 // This function takes variable arguments, some of which may have been passed
706 // in registers %i0-%i5. Variable floating point arguments are never passed
707 // in floating point registers. They go on %i0-%i5 or on the stack like
708 // integer arguments.
709 //
710 // The va_start intrinsic needs to know the offset to the first variable
711 // argument.
712 unsigned ArgOffset = CCInfo.getStackSize();
714 // Skip the 128 bytes of register save area.
715 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
716 Subtarget->getStackPointerBias());
717
718 // Save the variable arguments that were passed in registers.
719 // The caller is required to reserve stack space for 6 arguments regardless
720 // of how many arguments were actually passed.
721 SmallVector<SDValue, 8> OutChains;
722 for (; ArgOffset < 6*8; ArgOffset += 8) {
723 Register VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
724 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
725 int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
726 auto PtrVT = getPointerTy(MF.getDataLayout());
727 OutChains.push_back(
728 DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
730 }
731
732 if (!OutChains.empty())
733 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
734
735 return Chain;
736}
737
738// Check whether any of the argument registers are reserved
740 const MachineFunction &MF) {
741 // The register window design means that outgoing parameters at O*
742 // will appear in the callee as I*.
743 // Be conservative and check both sides of the register names.
744 bool Outgoing =
745 llvm::any_of(SP::GPROutgoingArgRegClass, [TRI, &MF](MCPhysReg r) {
746 return TRI->isReservedReg(MF, r);
747 });
748 bool Incoming =
749 llvm::any_of(SP::GPRIncomingArgRegClass, [TRI, &MF](MCPhysReg r) {
750 return TRI->isReservedReg(MF, r);
751 });
752 return Outgoing || Incoming;
753}
754
756 const Function &F = MF.getFunction();
757 F.getContext().diagnose(DiagnosticInfoUnsupported{
758 F, ("SPARC doesn't support"
759 " function calls if any of the argument registers is reserved.")});
760}
761
764 SmallVectorImpl<SDValue> &InVals) const {
765 if (Subtarget->is64Bit())
766 return LowerCall_64(CLI, InVals);
767 return LowerCall_32(CLI, InVals);
768}
769
770static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
771 const CallBase *Call) {
772 if (Call)
773 return Call->hasFnAttr(Attribute::ReturnsTwice);
774
775 const Function *CalleeFn = nullptr;
777 CalleeFn = dyn_cast<Function>(G->getGlobal());
778 } else if (ExternalSymbolSDNode *E =
780 const Function &Fn = DAG.getMachineFunction().getFunction();
781 const Module *M = Fn.getParent();
782 const char *CalleeName = E->getSymbol();
783 CalleeFn = M->getFunction(CalleeName);
784 }
785
786 if (!CalleeFn)
787 return false;
788 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
789}
790
791/// IsEligibleForTailCallOptimization - Check whether the call is eligible
792/// for tail call optimization.
794 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const {
795
796 auto &Outs = CLI.Outs;
797 auto &Caller = MF.getFunction();
798
799 // Do not tail call opt functions with "disable-tail-calls" attribute.
800 if (Caller.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
801 return false;
802
803 // Do not tail call opt if the stack is used to pass parameters.
804 // 64-bit targets have a slightly higher limit since the ABI requires
805 // to allocate some space even when all the parameters fit inside registers.
806 unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0;
807 if (CCInfo.getStackSize() > StackSizeLimit)
808 return false;
809
810 // Do not tail call opt if either the callee or caller returns
811 // a struct and the other does not.
812 if (!Outs.empty() && Caller.hasStructRetAttr() != Outs[0].Flags.isSRet())
813 return false;
814
815 // Byval parameters hand the function a pointer directly into the stack area
816 // we want to reuse during a tail call.
817 for (auto &Arg : Outs)
818 if (Arg.Flags.isByVal())
819 return false;
820
821 return true;
822}
823
824// Lower a call for the 32-bit ABI.
827 SmallVectorImpl<SDValue> &InVals) const {
828 SelectionDAG &DAG = CLI.DAG;
829 SDLoc &dl = CLI.DL;
831 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
833 SDValue Chain = CLI.Chain;
834 SDValue Callee = CLI.Callee;
835 bool &isTailCall = CLI.IsTailCall;
836 CallingConv::ID CallConv = CLI.CallConv;
837 bool isVarArg = CLI.IsVarArg;
839
840 // Analyze operands of the call, assigning locations to each operand.
842 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
843 *DAG.getContext());
844 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
845
846 isTailCall = isTailCall && IsEligibleForTailCallOptimization(
847 CCInfo, CLI, DAG.getMachineFunction());
848
849 // Get the size of the outgoing arguments stack space requirement.
850 unsigned ArgsSize = CCInfo.getStackSize();
851
852 // Keep stack frames 8-byte aligned.
853 ArgsSize = (ArgsSize+7) & ~7;
854
856
857 // Create local copies for byval args.
858 SmallVector<SDValue, 8> ByValArgs;
859 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
860 ISD::ArgFlagsTy Flags = Outs[i].Flags;
861 if (!Flags.isByVal())
862 continue;
863
864 SDValue Arg = OutVals[i];
865 unsigned Size = Flags.getByValSize();
866 Align Alignment = Flags.getNonZeroByValAlign();
867
868 if (Size > 0U) {
869 int FI = MFI.CreateStackObject(Size, Alignment, false);
870 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
871 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
872
873 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Alignment,
874 false, // isVolatile,
875 (Size <= 32), // AlwaysInline if size <= 32,
876 /*CI=*/nullptr, std::nullopt, MachinePointerInfo(),
878 ByValArgs.push_back(FIPtr);
879 }
880 else {
881 SDValue nullVal;
882 ByValArgs.push_back(nullVal);
883 }
884 }
885
886 assert(!isTailCall || ArgsSize == 0);
887
888 if (!isTailCall)
889 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
890
892 SmallVector<SDValue, 8> MemOpChains;
893
894 const unsigned StackOffset = 92;
895 bool hasStructRetAttr = false;
896 unsigned SRetArgSize = 0;
897 // Walk the register/memloc assignments, inserting copies/loads.
898 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
899 i != e;
900 ++i, ++realArgIdx) {
901 CCValAssign &VA = ArgLocs[i];
902 SDValue Arg = OutVals[realArgIdx];
903
904 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
905
906 // Use local copy if it is a byval arg.
907 if (Flags.isByVal()) {
908 Arg = ByValArgs[byvalArgIdx++];
909 if (!Arg) {
910 continue;
911 }
912 }
913
914 // Promote the value if needed.
915 switch (VA.getLocInfo()) {
916 default: llvm_unreachable("Unknown loc info!");
917 case CCValAssign::Full: break;
919 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
920 break;
922 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
923 break;
925 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
926 break;
928 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
929 break;
930 }
931
932 if (Flags.isSRet()) {
933 assert(VA.needsCustom());
934
935 if (isTailCall)
936 continue;
937
938 // store SRet argument in %sp+64
939 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
940 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
941 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
942 MemOpChains.push_back(
943 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
944 hasStructRetAttr = true;
945 // sret only allowed on first argument
946 assert(Outs[realArgIdx].OrigArgIndex == 0);
947 SRetArgSize =
948 DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
949 continue;
950 }
951
952 if (VA.needsCustom()) {
953 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
954
955 if (VA.isMemLoc()) {
956 unsigned Offset = VA.getLocMemOffset() + StackOffset;
957 // if it is double-word aligned, just store.
958 if (Offset % 8 == 0) {
959 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
960 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
961 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
962 MemOpChains.push_back(
963 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
964 continue;
965 }
966 }
967
968 if (VA.getLocVT() == MVT::f64) {
969 // Move from the float value from float registers into the
970 // integer registers.
972 Arg = bitcastConstantFPToInt(C, dl, DAG);
973 else
974 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
975 }
976
977 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
978 Arg,
979 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
980 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
981 Arg,
982 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
983
984 if (VA.isRegLoc()) {
985 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
986 assert(i+1 != e);
987 CCValAssign &NextVA = ArgLocs[++i];
988 if (NextVA.isRegLoc()) {
989 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
990 } else {
991 // Store the second part in stack.
992 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
993 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
994 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
995 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
996 MemOpChains.push_back(
997 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
998 }
999 } else {
1000 unsigned Offset = VA.getLocMemOffset() + StackOffset;
1001 // Store the first part.
1002 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1003 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
1004 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1005 MemOpChains.push_back(
1006 DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
1007 // Store the second part.
1008 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
1009 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1010 MemOpChains.push_back(
1011 DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
1012 }
1013 continue;
1014 }
1015
1016 // Arguments that can be passed on register must be kept at
1017 // RegsToPass vector
1018 if (VA.isRegLoc()) {
1019 if (VA.getLocVT() != MVT::f32) {
1020 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1021 continue;
1022 }
1023 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
1024 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1025 continue;
1026 }
1027
1028 assert(VA.isMemLoc());
1029
1030 // Create a store off the stack pointer for this argument.
1031 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
1033 dl);
1034 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
1035 MemOpChains.push_back(
1036 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
1037 }
1038
1039
1040 // Emit all stores, make sure the occur before any copies into physregs.
1041 if (!MemOpChains.empty())
1042 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1043
1044 // Build a sequence of copy-to-reg nodes chained together with token
1045 // chain and flag operands which copy the outgoing args into registers.
1046 // The InGlue in necessary since all emitted instructions must be
1047 // stuck together.
1048 SDValue InGlue;
1049 for (const auto &[OrigReg, N] : RegsToPass) {
1050 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1051 Chain = DAG.getCopyToReg(Chain, dl, Reg, N, InGlue);
1052 InGlue = Chain.getValue(1);
1053 }
1054
1055 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1056
1057 // If the callee is a GlobalAddress node (quite common, every direct call is)
1058 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1059 // Likewise ExternalSymbol -> TargetExternalSymbol.
1061 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0);
1063 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
1064
1065 // Returns a chain & a flag for retval copy to use
1066 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1068 Ops.push_back(Chain);
1069 Ops.push_back(Callee);
1070 if (hasStructRetAttr)
1071 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
1072 for (const auto &[OrigReg, N] : RegsToPass) {
1073 Register Reg = isTailCall ? OrigReg : toCallerWindow(OrigReg);
1074 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1075 }
1076
1077 // Add a register mask operand representing the call-preserved registers.
1078 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1079 const uint32_t *Mask =
1080 ((hasReturnsTwice)
1081 ? TRI->getRTCallPreservedMask(CallConv)
1082 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
1083
1084 if (isAnyArgRegReserved(TRI, MF))
1086
1087 assert(Mask && "Missing call preserved mask for calling convention");
1088 Ops.push_back(DAG.getRegisterMask(Mask));
1089
1090 if (InGlue.getNode())
1091 Ops.push_back(InGlue);
1092
1093 if (isTailCall) {
1095 return DAG.getNode(SPISD::TAIL_CALL, dl, MVT::Other, Ops);
1096 }
1097
1098 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
1099 InGlue = Chain.getValue(1);
1100
1101 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, dl);
1102 InGlue = Chain.getValue(1);
1103
1104 // Assign locations to each value returned by this call.
1106 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1107 *DAG.getContext());
1108
1109 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
1110
1111 // Copy all of the result registers out of their specified physreg.
1112 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1113 assert(RVLocs[i].isRegLoc() && "Can only return in registers!");
1114 if (RVLocs[i].getLocVT() == MVT::v2i32) {
1115 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
1117 Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InGlue);
1118 Chain = Lo.getValue(1);
1119 InGlue = Lo.getValue(2);
1120 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
1121 DAG.getConstant(0, dl, MVT::i32));
1123 Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InGlue);
1124 Chain = Hi.getValue(1);
1125 InGlue = Hi.getValue(2);
1126 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
1127 DAG.getConstant(1, dl, MVT::i32));
1128 InVals.push_back(Vec);
1129 } else {
1130 Chain =
1131 DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
1132 RVLocs[i].getValVT(), InGlue)
1133 .getValue(1);
1134 InGlue = Chain.getValue(2);
1135 InVals.push_back(Chain.getValue(0));
1136 }
1137 }
1138
1139 return Chain;
1140}
1141
1142// FIXME? Maybe this could be a TableGen attribute on some registers and
1143// this table could be generated automatically from RegInfo.
1145 const MachineFunction &MF) const {
1147 .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
1148 .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
1149 .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
1150 .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
1151 .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
1152 .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
1153 .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
1154 .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
1155 .Default(0);
1156
1157 // If we're directly referencing register names
1158 // (e.g in GCC C extension `register int r asm("g1");`),
1159 // make sure that said register is in the reserve list.
1160 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1161 if (!TRI->isReservedReg(MF, Reg))
1162 Reg = Register();
1163
1164 return Reg;
1165}
1166
1167// Fixup floating point arguments in the ... part of a varargs call.
1168//
1169// The SPARC v9 ABI requires that floating point arguments are treated the same
1170// as integers when calling a varargs function. This does not apply to the
1171// fixed arguments that are part of the function's prototype.
1172//
1173// This function post-processes a CCValAssign array created by
1174// AnalyzeCallOperands().
1177 for (CCValAssign &VA : ArgLocs) {
1178 MVT ValTy = VA.getLocVT();
1179 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1180 // varargs functions.
1181 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1182 continue;
1183 // The fixed arguments to a varargs function still go in FP registers.
1184 if (!Outs[VA.getValNo()].Flags.isVarArg())
1185 continue;
1186
1187 // This floating point argument should be reassigned.
1188 // Determine the offset into the argument array.
1189 Register firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1190 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1191 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1192 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1193
1194 if (Offset < 6*8) {
1195 // This argument should go in %i0-%i5.
1196 unsigned IReg = SP::I0 + Offset/8;
1197 if (ValTy == MVT::f64)
1198 // Full register, just bitconvert into i64.
1199 VA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), IReg, MVT::i64,
1201 else {
1202 assert(ValTy == MVT::f128 && "Unexpected type!");
1203 // Full register, just bitconvert into i128 -- We will lower this into
1204 // two i64s in LowerCall_64.
1205 VA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(), IReg,
1206 MVT::i128, CCValAssign::BCvt);
1207 }
1208 } else {
1209 // This needs to go to memory, we're out of integer registers.
1210 VA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(), Offset,
1211 VA.getLocVT(), VA.getLocInfo());
1212 }
1213 }
1214}
1215
1216// Lower a call for the 64-bit ABI.
1217SDValue
1219 SmallVectorImpl<SDValue> &InVals) const {
1220 SelectionDAG &DAG = CLI.DAG;
1221 SDLoc DL = CLI.DL;
1222 SDValue Chain = CLI.Chain;
1223 auto PtrVT = getPointerTy(DAG.getDataLayout());
1225
1226 // Analyze operands of the call, assigning locations to each operand.
1228 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1229 *DAG.getContext());
1230 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1231
1233 CCInfo, CLI, DAG.getMachineFunction());
1234
1235 // Get the size of the outgoing arguments stack space requirement.
1236 // The stack offset computed by CC_Sparc64 includes all arguments.
1237 // Called functions expect 6 argument words to exist in the stack frame, used
1238 // or not.
1239 unsigned StackReserved = 6 * 8u;
1240 unsigned ArgsSize = std::max<unsigned>(StackReserved, CCInfo.getStackSize());
1241
1242 // Keep stack frames 16-byte aligned.
1243 ArgsSize = alignTo(ArgsSize, 16);
1244
1245 // Varargs calls require special treatment.
1246 if (CLI.IsVarArg)
1247 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1248
1249 assert(!CLI.IsTailCall || ArgsSize == StackReserved);
1250
1251 // Adjust the stack pointer to make room for the arguments.
1252 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1253 // with more than 6 arguments.
1254 if (!CLI.IsTailCall)
1255 Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
1256
1257 // Collect the set of registers to pass to the function and their values.
1258 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1259 // instruction.
1261
1262 // Collect chains from all the memory opeations that copy arguments to the
1263 // stack. They must follow the stack pointer adjustment above and precede the
1264 // call instruction itself.
1265 SmallVector<SDValue, 8> MemOpChains;
1266
1267 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1268 const CCValAssign &VA = ArgLocs[i];
1269 SDValue Arg = CLI.OutVals[i];
1270
1271 // Promote the value if needed.
1272 switch (VA.getLocInfo()) {
1273 default:
1274 llvm_unreachable("Unknown location info!");
1275 case CCValAssign::Full:
1276 break;
1277 case CCValAssign::SExt:
1278 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1279 break;
1280 case CCValAssign::ZExt:
1281 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1282 break;
1283 case CCValAssign::AExt:
1284 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1285 break;
1286 case CCValAssign::BCvt:
1287 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1288 // SPARC does not support i128 natively. Lower it into two i64, see below.
1289 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1290 || VA.getLocVT() != MVT::i128)
1291 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1292 break;
1293 }
1294
1295 if (VA.isRegLoc()) {
1296 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1297 && VA.getLocVT() == MVT::i128) {
1298 // Store and reload into the integer register reg and reg+1.
1299 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1300 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1301 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1302 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1303 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1304 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1305 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1306
1307 // Store to %sp+BIAS+128+Offset
1308 SDValue Store =
1309 DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
1310 // Load into Reg and Reg+1
1311 SDValue Hi64 =
1312 DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
1313 SDValue Lo64 =
1314 DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
1315
1316 Register HiReg = VA.getLocReg();
1317 Register LoReg = VA.getLocReg() + 1;
1318 if (!CLI.IsTailCall) {
1319 HiReg = toCallerWindow(HiReg);
1320 LoReg = toCallerWindow(LoReg);
1321 }
1322
1323 RegsToPass.push_back(std::make_pair(HiReg, Hi64));
1324 RegsToPass.push_back(std::make_pair(LoReg, Lo64));
1325 continue;
1326 }
1327
1328 // The custom bit on an i32 return value indicates that it should be
1329 // passed in the high bits of the register.
1330 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1331 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1332 DAG.getConstant(32, DL, MVT::i32));
1333
1334 // The next value may go in the low bits of the same register.
1335 // Handle both at once.
1336 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1337 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1338 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1339 CLI.OutVals[i+1]);
1340 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1341 // Skip the next value, it's already done.
1342 ++i;
1343 }
1344 }
1345
1346 Register Reg = VA.getLocReg();
1347 if (!CLI.IsTailCall)
1348 Reg = toCallerWindow(Reg);
1349 RegsToPass.push_back(std::make_pair(Reg, Arg));
1350 continue;
1351 }
1352
1353 assert(VA.isMemLoc());
1354
1355 // Create a store off the stack pointer for this argument.
1356 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1357 // The argument area starts at %fp+BIAS+128 in the callee frame,
1358 // %sp+BIAS+128 in ours.
1359 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1360 Subtarget->getStackPointerBias() +
1361 128, DL);
1362 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1363 MemOpChains.push_back(
1364 DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
1365 }
1366
1367 // Emit all stores, make sure they occur before the call.
1368 if (!MemOpChains.empty())
1369 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1370
1371 // Build a sequence of CopyToReg nodes glued together with token chain and
1372 // glue operands which copy the outgoing args into registers. The InGlue is
1373 // necessary since all emitted instructions must be stuck together in order
1374 // to pass the live physical registers.
1375 SDValue InGlue;
1376 for (const auto &[Reg, N] : RegsToPass) {
1377 Chain = DAG.getCopyToReg(Chain, DL, Reg, N, InGlue);
1378 InGlue = Chain.getValue(1);
1379 }
1380
1381 // If the callee is a GlobalAddress node (quite common, every direct call is)
1382 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1383 // Likewise ExternalSymbol -> TargetExternalSymbol.
1384 SDValue Callee = CLI.Callee;
1385 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CB);
1387 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0);
1389 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1390
1391 // Build the operands for the call instruction itself.
1393 Ops.push_back(Chain);
1394 Ops.push_back(Callee);
1395 for (const auto &[Reg, N] : RegsToPass)
1396 Ops.push_back(DAG.getRegister(Reg, N.getValueType()));
1397
1398 // Add a register mask operand representing the call-preserved registers.
1399 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1400 const uint32_t *Mask =
1401 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1402 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1403 CLI.CallConv));
1404
1405 if (isAnyArgRegReserved(TRI, MF))
1407
1408 assert(Mask && "Missing call preserved mask for calling convention");
1409 Ops.push_back(DAG.getRegisterMask(Mask));
1410
1411 // Make sure the CopyToReg nodes are glued to the call instruction which
1412 // consumes the registers.
1413 if (InGlue.getNode())
1414 Ops.push_back(InGlue);
1415
1416 // Now the call itself.
1417 if (CLI.IsTailCall) {
1419 return DAG.getNode(SPISD::TAIL_CALL, DL, MVT::Other, Ops);
1420 }
1421 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1422 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1423 InGlue = Chain.getValue(1);
1424
1425 // Revert the stack pointer immediately after the call.
1426 Chain = DAG.getCALLSEQ_END(Chain, ArgsSize, 0, InGlue, DL);
1427 InGlue = Chain.getValue(1);
1428
1429 // Now extract the return values. This is more or less the same as
1430 // LowerFormalArguments_64.
1431
1432 // Assign locations to each value returned by this call.
1434 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1435 *DAG.getContext());
1436
1437 // Set inreg flag manually for codegen generated library calls that
1438 // return float.
1439 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CB)
1440 CLI.Ins[0].Flags.setInReg();
1441
1442 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1443
1444 // Copy all of the result registers out of their specified physreg.
1445 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1446 CCValAssign &VA = RVLocs[i];
1447 assert(VA.isRegLoc() && "Can only return in registers!");
1448 unsigned Reg = toCallerWindow(VA.getLocReg());
1449
1450 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1451 // reside in the same register in the high and low bits. Reuse the
1452 // CopyFromReg previous node to avoid duplicate copies.
1453 SDValue RV;
1454 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1455 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1456 RV = Chain.getValue(0);
1457
1458 // But usually we'll create a new CopyFromReg for a different register.
1459 if (!RV.getNode()) {
1460 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1461 Chain = RV.getValue(1);
1462 InGlue = Chain.getValue(2);
1463 }
1464
1465 // Get the high bits for i32 struct elements.
1466 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1467 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1468 DAG.getConstant(32, DL, MVT::i32));
1469
1470 // The callee promoted the return value, so insert an Assert?ext SDNode so
1471 // we won't promote the value again in this function.
1472 switch (VA.getLocInfo()) {
1473 case CCValAssign::SExt:
1474 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1475 DAG.getValueType(VA.getValVT()));
1476 break;
1477 case CCValAssign::ZExt:
1478 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1479 DAG.getValueType(VA.getValVT()));
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 // Truncate the register down to the return value type.
1486 if (VA.isExtInLoc())
1487 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1488
1489 InVals.push_back(RV);
1490 }
1491
1492 return Chain;
1493}
1494
1495//===----------------------------------------------------------------------===//
1496// TargetLowering Implementation
1497//===----------------------------------------------------------------------===//
1498
1506
1507/// intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC
1508/// rcond condition.
1510 switch (CC) {
1511 default:
1512 llvm_unreachable("Unknown/unsigned integer condition code!");
1513 case ISD::SETEQ:
1514 return SPCC::REG_Z;
1515 case ISD::SETNE:
1516 return SPCC::REG_NZ;
1517 case ISD::SETLT:
1518 return SPCC::REG_LZ;
1519 case ISD::SETGT:
1520 return SPCC::REG_GZ;
1521 case ISD::SETLE:
1522 return SPCC::REG_LEZ;
1523 case ISD::SETGE:
1524 return SPCC::REG_GEZ;
1525 }
1526}
1527
1528/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1529/// condition.
1531 switch (CC) {
1532 default: llvm_unreachable("Unknown integer condition code!");
1533 case ISD::SETEQ: return SPCC::ICC_E;
1534 case ISD::SETNE: return SPCC::ICC_NE;
1535 case ISD::SETLT: return SPCC::ICC_L;
1536 case ISD::SETGT: return SPCC::ICC_G;
1537 case ISD::SETLE: return SPCC::ICC_LE;
1538 case ISD::SETGE: return SPCC::ICC_GE;
1539 case ISD::SETULT: return SPCC::ICC_CS;
1540 case ISD::SETULE: return SPCC::ICC_LEU;
1541 case ISD::SETUGT: return SPCC::ICC_GU;
1542 case ISD::SETUGE: return SPCC::ICC_CC;
1543 }
1544}
1545
1546/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1547/// FCC condition.
1549 switch (CC) {
1550 default: llvm_unreachable("Unknown fp condition code!");
1551 case ISD::SETEQ:
1552 case ISD::SETOEQ: return SPCC::FCC_E;
1553 case ISD::SETNE:
1554 case ISD::SETUNE: return SPCC::FCC_NE;
1555 case ISD::SETLT:
1556 case ISD::SETOLT: return SPCC::FCC_L;
1557 case ISD::SETGT:
1558 case ISD::SETOGT: return SPCC::FCC_G;
1559 case ISD::SETLE:
1560 case ISD::SETOLE: return SPCC::FCC_LE;
1561 case ISD::SETGE:
1562 case ISD::SETOGE: return SPCC::FCC_GE;
1563 case ISD::SETULT: return SPCC::FCC_UL;
1564 case ISD::SETULE: return SPCC::FCC_ULE;
1565 case ISD::SETUGT: return SPCC::FCC_UG;
1566 case ISD::SETUGE: return SPCC::FCC_UGE;
1567 case ISD::SETUO: return SPCC::FCC_U;
1568 case ISD::SETO: return SPCC::FCC_O;
1569 case ISD::SETONE: return SPCC::FCC_LG;
1570 case ISD::SETUEQ: return SPCC::FCC_UE;
1571 }
1572}
1573
1575 const SparcSubtarget &STI)
1576 : TargetLowering(TM), Subtarget(&STI) {
1577 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
1578
1579 // Instructions which use registers as conditionals examine all the
1580 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1581 // matters much whether it's ZeroOrOneBooleanContent, or
1582 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1583 // former.
1586
1587 // Set up the register classes.
1588 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1589 if (!Subtarget->useSoftFloat()) {
1590 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1591 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1592 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1593 }
1594 if (Subtarget->is64Bit()) {
1595 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1596 } else {
1597 // On 32bit sparc, we define a double-register 32bit register
1598 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1599 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1600
1601 // ...but almost all operations must be expanded, so set that as
1602 // the default.
1603 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1604 setOperationAction(Op, MVT::v2i32, Expand);
1605 }
1606 // Truncating/extending stores/loads are also not supported.
1608 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1609 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1610 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1611
1612 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1613 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1614 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1615
1616 setTruncStoreAction(VT, MVT::v2i32, Expand);
1617 setTruncStoreAction(MVT::v2i32, VT, Expand);
1618 }
1619 // However, load and store *are* legal.
1620 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1621 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1624
1625 // And we need to promote i64 loads/stores into vector load/store
1626 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1627 setOperationAction(ISD::STORE, MVT::i64, Custom);
1628
1629 // Sadly, this doesn't work:
1630 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1631 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1632 }
1633
1634 // Turn FP extload into load/fpextend
1635 for (MVT VT : MVT::fp_valuetypes()) {
1636 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1637 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1638 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1639 }
1640
1641 // Sparc doesn't have i1 sign extending load
1642 for (MVT VT : MVT::integer_valuetypes())
1643 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1644
1645 // Turn FP truncstore into trunc + store.
1646 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1647 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1648 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1649 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
1650 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1651 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1652
1653 // Custom legalize GlobalAddress nodes into LO/HI parts.
1658
1659 // Sparc doesn't have sext_inreg, replace them with shl/sra
1663
1664 // Sparc has no REM or DIVREM operations.
1669
1670 // ... nor does SparcV9.
1671 if (Subtarget->is64Bit()) {
1676 }
1677
1678 // Custom expand fp<->sint
1683
1684 // Custom Expand fp<->uint
1689
1690 // Lower f16 conversion operations into library calls
1691 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1692 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1693 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1694 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1695 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
1696 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
1697
1698 setOperationAction(ISD::BITCAST, MVT::f32,
1699 Subtarget->isVIS3() ? Legal : Expand);
1700 setOperationAction(ISD::BITCAST, MVT::i32,
1701 Subtarget->isVIS3() ? Legal : Expand);
1702
1703 // Sparc has no select or setcc: expand to SELECT_CC.
1708
1713
1714 // Sparc doesn't have BRCOND either, it has BR_CC.
1715 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1716 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1717 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1718 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1719 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1720 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1721 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1722
1727
1732
1733 if (Subtarget->isVIS3()) {
1736 }
1737
1738 if (Subtarget->is64Bit()) {
1739 setOperationAction(ISD::BITCAST, MVT::f64,
1740 Subtarget->isVIS3() ? Legal : Expand);
1741 setOperationAction(ISD::BITCAST, MVT::i64,
1742 Subtarget->isVIS3() ? Legal : Expand);
1745 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1747
1749 Subtarget->usePopc() ? Legal : Expand);
1751 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1752 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1753 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1754 }
1755
1756 // ATOMICs.
1757 // Atomics are supported on SparcV9. 32-bit atomics are also
1758 // supported by some Leon SparcV8 variants. Otherwise, atomics
1759 // are unsupported.
1760 if (Subtarget->isV9()) {
1761 // TODO: we _ought_ to be able to support 64-bit atomics on 32-bit sparcv9,
1762 // but it hasn't been implemented in the backend yet.
1763 if (Subtarget->is64Bit())
1765 else
1767 } else if (Subtarget->hasLeonCasa())
1769 else
1771
1773
1774 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1775
1776 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1777
1778 // Custom Lower Atomic LOAD/STORE
1779 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1780 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1781
1782 if (Subtarget->is64Bit()) {
1783 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1784 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1785 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1786 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1787 }
1788
1789 if (!Subtarget->isV9()) {
1790 // SparcV8 does not have FNEGD and FABSD.
1791 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1792 setOperationAction(ISD::FABS, MVT::f64, Custom);
1793 }
1794
1795 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1796 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1797 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1798 setOperationAction(ISD::FREM , MVT::f128, Expand);
1799 setOperationAction(ISD::FMA , MVT::f128, Expand);
1800 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1801 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1802 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1803 setOperationAction(ISD::FREM , MVT::f64, Expand);
1804 setOperationAction(ISD::FMA, MVT::f64,
1805 Subtarget->isUA2007() ? Legal : Expand);
1806 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1807 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1808 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1809 setOperationAction(ISD::FREM , MVT::f32, Expand);
1810 setOperationAction(ISD::FMA, MVT::f32,
1811 Subtarget->isUA2007() ? Legal : Expand);
1812 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1813 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1818 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1819 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1820 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1821
1825
1826 // Expands to [SU]MUL_LOHI.
1830
1831 if (Subtarget->useSoftMulDiv()) {
1832 // .umul works for both signed and unsigned
1837 }
1838
1839 if (Subtarget->is64Bit()) {
1843 Subtarget->isVIS3() ? Legal : Expand);
1845 Subtarget->isVIS3() ? Legal : Expand);
1846
1850 }
1851
1852 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1853 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1854 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1855 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1856
1857 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1858 setOperationAction(ISD::DEBUGTRAP , MVT::Other, Legal);
1859
1860 // Use the default implementation.
1861 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1862 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1863 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1864 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1865 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1866
1868
1870 Subtarget->usePopc() ? Legal : Expand);
1871
1872 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1873 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1874 setOperationAction(ISD::STORE, MVT::f128, Legal);
1875 } else {
1876 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1877 setOperationAction(ISD::STORE, MVT::f128, Custom);
1878 }
1879
1880 if (Subtarget->hasHardQuad()) {
1881 setOperationAction(ISD::FADD, MVT::f128, Legal);
1882 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1883 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1884 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1885 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1886 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1888 if (Subtarget->isV9()) {
1889 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1890 setOperationAction(ISD::FABS, MVT::f128, Legal);
1891 } else {
1892 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1893 setOperationAction(ISD::FABS, MVT::f128, Custom);
1894 }
1895 } else {
1896 // Custom legalize f128 operations.
1897
1898 setOperationAction(ISD::FADD, MVT::f128, Custom);
1899 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1900 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1901 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1902 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1903 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1904 setOperationAction(ISD::FABS, MVT::f128, Custom);
1905
1906 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1909
1910 // Setup Runtime library names.
1911 if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
1912 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Qp_add);
1913 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Qp_sub);
1914 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Qp_mul);
1915 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Qp_div);
1916 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Qp_sqrt);
1917 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Qp_qtoi);
1918 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Qp_qtoui);
1919 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Qp_itoq);
1920 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Qp_uitoq);
1921 setLibcallImpl(RTLIB::FPTOSINT_F128_I64, RTLIB::impl__Qp_qtox);
1922 setLibcallImpl(RTLIB::FPTOUINT_F128_I64, RTLIB::impl__Qp_qtoux);
1923 setLibcallImpl(RTLIB::SINTTOFP_I64_F128, RTLIB::impl__Qp_xtoq);
1924 setLibcallImpl(RTLIB::UINTTOFP_I64_F128, RTLIB::impl__Qp_uxtoq);
1925 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Qp_stoq);
1926 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Qp_dtoq);
1927 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Qp_qtos);
1928 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Qp_qtod);
1929 } else if (!Subtarget->useSoftFloat()) {
1930 setLibcallImpl(RTLIB::ADD_F128, RTLIB::impl__Q_add);
1931 setLibcallImpl(RTLIB::SUB_F128, RTLIB::impl__Q_sub);
1932 setLibcallImpl(RTLIB::MUL_F128, RTLIB::impl__Q_mul);
1933 setLibcallImpl(RTLIB::DIV_F128, RTLIB::impl__Q_div);
1934 setLibcallImpl(RTLIB::SQRT_F128, RTLIB::impl__Q_sqrt);
1935 setLibcallImpl(RTLIB::FPTOSINT_F128_I32, RTLIB::impl__Q_qtoi);
1936 setLibcallImpl(RTLIB::FPTOUINT_F128_I32, RTLIB::impl__Q_qtou);
1937 setLibcallImpl(RTLIB::SINTTOFP_I32_F128, RTLIB::impl__Q_itoq);
1938 setLibcallImpl(RTLIB::UINTTOFP_I32_F128, RTLIB::impl__Q_utoq);
1939 setLibcallImpl(RTLIB::FPEXT_F32_F128, RTLIB::impl__Q_stoq);
1940 setLibcallImpl(RTLIB::FPEXT_F64_F128, RTLIB::impl__Q_dtoq);
1941 setLibcallImpl(RTLIB::FPROUND_F128_F32, RTLIB::impl__Q_qtos);
1942 setLibcallImpl(RTLIB::FPROUND_F128_F64, RTLIB::impl__Q_qtod);
1943 }
1944 }
1945
1946 if (Subtarget->fixAllFDIVSQRT()) {
1947 // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
1948 // the former instructions generate errata on LEON processors.
1950 setOperationAction(ISD::FSQRT, MVT::f32, Promote);
1951 }
1952
1953 if (Subtarget->hasNoFMULS()) {
1955 }
1956
1957 // Custom combine bitcast between f64 and v2i32
1958 if (!Subtarget->is64Bit())
1959 setTargetDAGCombine(ISD::BITCAST);
1960
1961 if (Subtarget->hasLeonCycleCounter())
1962 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1963
1964 if (Subtarget->isVIS3()) {
1969
1970 setOperationAction(ISD::CTTZ, MVT::i32,
1971 Subtarget->is64Bit() ? Promote : Expand);
1974 Subtarget->is64Bit() ? Promote : Expand);
1976 } else if (Subtarget->usePopc()) {
1981
1986 } else {
1990 Subtarget->is64Bit() ? Promote : LibCall);
1992
1993 // FIXME here we don't have any ISA extensions that could help us, so to
1994 // prevent large expansions those should be made into LibCalls.
1999 }
2000
2002
2003 // Some processors have no branch predictor and have pipelines longer than
2004 // what can be covered by the delay slot. This results in a stall, so mark
2005 // branches to be expensive on those processors.
2006 setJumpIsExpensive(Subtarget->hasNoPredictor());
2007 // The high cost of branching means that using conditional moves will
2008 // still be profitable even if the condition is predictable.
2010
2012
2013 computeRegisterProperties(Subtarget->getRegisterInfo());
2014}
2015
2017 return Subtarget->useSoftFloat();
2018}
2019
2021 EVT VT) const {
2022 if (!VT.isVector())
2023 return MVT::i32;
2025}
2026
2027/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
2028/// be zero. Op is expected to be a target specific node. Used by DAG
2029/// combiner.
2031 (const SDValue Op,
2032 KnownBits &Known,
2033 const APInt &DemandedElts,
2034 const SelectionDAG &DAG,
2035 unsigned Depth) const {
2036 KnownBits Known2;
2037 Known.resetAll();
2038
2039 switch (Op.getOpcode()) {
2040 default: break;
2041 case SPISD::SELECT_ICC:
2042 case SPISD::SELECT_XCC:
2043 case SPISD::SELECT_FCC:
2044 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
2045 Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
2046
2047 // Only known if known in both the LHS and RHS.
2048 Known = Known.intersectWith(Known2);
2049 break;
2050 }
2051}
2052
2053// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
2054// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
2056 ISD::CondCode CC, unsigned &SPCC) {
2057 if (isNullConstant(RHS) && CC == ISD::SETNE &&
2058 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
2059 LHS.getOpcode() == SPISD::SELECT_XCC) &&
2060 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
2061 (LHS.getOpcode() == SPISD::SELECT_FCC &&
2062 (LHS.getOperand(3).getOpcode() == SPISD::CMPFCC ||
2063 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC_V9))) &&
2064 isOneConstant(LHS.getOperand(0)) && isNullConstant(LHS.getOperand(1))) {
2065 SDValue CMPCC = LHS.getOperand(3);
2066 SPCC = LHS.getConstantOperandVal(2);
2067 LHS = CMPCC.getOperand(0);
2068 RHS = CMPCC.getOperand(1);
2069 }
2070}
2071
2072// Convert to a target node and set target flags.
2074 SelectionDAG &DAG) const {
2076 return DAG.getTargetGlobalAddress(GA->getGlobal(),
2077 SDLoc(GA),
2078 GA->getValueType(0),
2079 GA->getOffset(), TF);
2080
2082 return DAG.getTargetConstantPool(CP->getConstVal(), CP->getValueType(0),
2083 CP->getAlign(), CP->getOffset(), TF);
2084
2086 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
2087 Op.getValueType(),
2088 0,
2089 TF);
2090
2092 return DAG.getTargetExternalSymbol(ES->getSymbol(),
2093 ES->getValueType(0), TF);
2094
2095 llvm_unreachable("Unhandled address SDNode");
2096}
2097
2098// Split Op into high and low parts according to HiTF and LoTF.
2099// Return an ADD node combining the parts.
2101 unsigned HiTF, unsigned LoTF,
2102 SelectionDAG &DAG) const {
2103 SDLoc DL(Op);
2104 EVT VT = Op.getValueType();
2105 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
2106 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
2107 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2108}
2109
2110// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
2111// or ExternalSymbol SDNode.
2113 SDLoc DL(Op);
2114 EVT VT = getPointerTy(DAG.getDataLayout());
2115
2116 // Handle PIC mode first. SPARC needs a got load for every variable!
2117 if (isPositionIndependent()) {
2118 const Module *M = DAG.getMachineFunction().getFunction().getParent();
2119 PICLevel::Level picLevel = M->getPICLevel();
2120 SDValue Idx;
2121
2122 if (picLevel == PICLevel::SmallPIC) {
2123 // This is the pic13 code model, the GOT is known to be smaller than 8KiB.
2124 Idx = DAG.getNode(SPISD::Lo, DL, Op.getValueType(),
2125 withTargetFlags(Op, ELF::R_SPARC_GOT13, DAG));
2126 } else {
2127 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
2128 Idx = makeHiLoPair(Op, ELF::R_SPARC_GOT22, ELF::R_SPARC_GOT10, DAG);
2129 }
2130
2131 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
2132 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, Idx);
2133 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2134 // function has calls.
2136 MFI.setHasCalls(true);
2137 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
2139 }
2140
2141 // This is one of the absolute code models.
2142 switch(getTargetMachine().getCodeModel()) {
2143 default:
2144 llvm_unreachable("Unsupported absolute code model");
2145 case CodeModel::Small:
2146 // abs32.
2147 return makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2148 case CodeModel::Medium: {
2149 // abs44.
2150 SDValue H44 = makeHiLoPair(Op, ELF::R_SPARC_H44, ELF::R_SPARC_M44, DAG);
2151 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
2152 SDValue L44 = withTargetFlags(Op, ELF::R_SPARC_L44, DAG);
2153 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
2154 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
2155 }
2156 case CodeModel::Large: {
2157 // abs64.
2158 SDValue Hi = makeHiLoPair(Op, ELF::R_SPARC_HH22, ELF::R_SPARC_HM10, DAG);
2159 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
2160 SDValue Lo = makeHiLoPair(Op, ELF::R_SPARC_HI22, ELF::R_SPARC_LO10, DAG);
2161 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
2162 }
2163 }
2164}
2165
2170
2175
2180
2182 SelectionDAG &DAG) const {
2183
2185 if (DAG.getTarget().useEmulatedTLS())
2186 return LowerToTLSEmulatedModel(GA, DAG);
2187
2188 SDLoc DL(GA);
2189 const GlobalValue *GV = GA->getGlobal();
2190 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2191
2193
2194 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2195 unsigned HiTF =
2196 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_HI22
2197 : ELF::R_SPARC_TLS_LDM_HI22);
2198 unsigned LoTF =
2199 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_LO10
2200 : ELF::R_SPARC_TLS_LDM_LO10);
2201 unsigned addTF =
2202 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_ADD
2203 : ELF::R_SPARC_TLS_LDM_ADD);
2204 unsigned callTF =
2205 ((model == TLSModel::GeneralDynamic) ? ELF::R_SPARC_TLS_GD_CALL
2206 : ELF::R_SPARC_TLS_LDM_CALL);
2207
2208 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
2209 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2210 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
2211 withTargetFlags(Op, addTF, DAG));
2212
2213 SDValue Chain = DAG.getEntryNode();
2214 SDValue InGlue;
2215
2216 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
2217 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InGlue);
2218 InGlue = Chain.getValue(1);
2219 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2220 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2221
2222 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2223 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2225 assert(Mask && "Missing call preserved mask for calling convention");
2226 SDValue Ops[] = {Chain,
2227 Callee,
2228 Symbol,
2229 DAG.getRegister(SP::O0, PtrVT),
2230 DAG.getRegisterMask(Mask),
2231 InGlue};
2232 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2233 InGlue = Chain.getValue(1);
2234 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
2235 InGlue = Chain.getValue(1);
2236 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InGlue);
2237
2238 if (model != TLSModel::LocalDynamic)
2239 return Ret;
2240
2241 SDValue Hi =
2242 DAG.getNode(SPISD::Hi, DL, PtrVT,
2243 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_HIX22, DAG));
2244 SDValue Lo =
2245 DAG.getNode(SPISD::Lo, DL, PtrVT,
2246 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_LOX10, DAG));
2247 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2248 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2249 withTargetFlags(Op, ELF::R_SPARC_TLS_LDO_ADD, DAG));
2250 }
2251
2252 if (model == TLSModel::InitialExec) {
2253 unsigned ldTF = ((PtrVT == MVT::i64) ? ELF::R_SPARC_TLS_IE_LDX
2254 : ELF::R_SPARC_TLS_IE_LD);
2255
2256 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2257
2258 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2259 // function has calls.
2261 MFI.setHasCalls(true);
2262
2263 SDValue TGA = makeHiLoPair(Op, ELF::R_SPARC_TLS_IE_HI22,
2264 ELF::R_SPARC_TLS_IE_LO10, DAG);
2265 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2266 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2267 DL, PtrVT, Ptr,
2268 withTargetFlags(Op, ldTF, DAG));
2269 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2270 DAG.getRegister(SP::G7, PtrVT), Offset,
2271 withTargetFlags(Op, ELF::R_SPARC_TLS_IE_ADD, DAG));
2272 }
2273
2274 assert(model == TLSModel::LocalExec);
2275 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2276 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_HIX22, DAG));
2277 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2278 withTargetFlags(Op, ELF::R_SPARC_TLS_LE_LOX10, DAG));
2279 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2280
2281 return DAG.getNode(ISD::ADD, DL, PtrVT,
2282 DAG.getRegister(SP::G7, PtrVT), Offset);
2283}
2284
2286 ArgListTy &Args, SDValue Arg,
2287 const SDLoc &DL,
2288 SelectionDAG &DAG) const {
2290 EVT ArgVT = Arg.getValueType();
2291 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2292
2293 if (ArgTy->isFP128Ty()) {
2294 // Create a stack object and pass the pointer to the library function.
2295 int FI = MFI.CreateStackObject(16, Align(8), false);
2296 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2297 Chain = DAG.getStore(Chain, DL, Arg, FIPtr, MachinePointerInfo(), Align(8));
2298 Args.emplace_back(FIPtr, PointerType::getUnqual(ArgTy->getContext()));
2299 } else {
2300 Args.emplace_back(Arg, ArgTy);
2301 }
2302 return Chain;
2303}
2304
2305SDValue
2307 const char *LibFuncName,
2308 unsigned numArgs) const {
2309
2310 ArgListTy Args;
2311
2313 auto PtrVT = getPointerTy(DAG.getDataLayout());
2314
2315 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2316 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2317 Type *RetTyABI = RetTy;
2318 SDValue Chain = DAG.getEntryNode();
2319 SDValue RetPtr;
2320
2321 if (RetTy->isFP128Ty()) {
2322 // Create a Stack Object to receive the return value of type f128.
2323 int RetFI = MFI.CreateStackObject(16, Align(8), false);
2324 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2325 ArgListEntry Entry(RetPtr, PointerType::getUnqual(RetTy->getContext()));
2326 if (!Subtarget->is64Bit()) {
2327 Entry.IsSRet = true;
2328 Entry.IndirectType = RetTy;
2329 }
2330 Entry.IsReturned = false;
2331 Args.push_back(Entry);
2332 RetTyABI = Type::getVoidTy(*DAG.getContext());
2333 }
2334
2335 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2336 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2337 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2338 }
2340 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2341 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
2342
2343 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2344
2345 // chain is in second result.
2346 if (RetTyABI == RetTy)
2347 return CallInfo.first;
2348
2349 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2350
2351 Chain = CallInfo.second;
2352
2353 // Load RetPtr to get the return value.
2354 return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
2356}
2357
2359 unsigned &SPCC, const SDLoc &DL,
2360 SelectionDAG &DAG) const {
2361
2362 const char *LibCall = nullptr;
2363 bool is64Bit = Subtarget->is64Bit();
2364 switch(SPCC) {
2365 default: llvm_unreachable("Unhandled conditional code!");
2366 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2367 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2368 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2369 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2370 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2371 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2372 case SPCC::FCC_UL :
2373 case SPCC::FCC_ULE:
2374 case SPCC::FCC_UG :
2375 case SPCC::FCC_UGE:
2376 case SPCC::FCC_U :
2377 case SPCC::FCC_O :
2378 case SPCC::FCC_LG :
2379 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2380 }
2381
2382 auto PtrVT = getPointerTy(DAG.getDataLayout());
2383 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2384 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2385 ArgListTy Args;
2386 SDValue Chain = DAG.getEntryNode();
2387 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2388 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2389
2391 CLI.setDebugLoc(DL).setChain(Chain)
2392 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
2393
2394 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2395
2396 // result is in first, and chain is in second result.
2397 SDValue Result = CallInfo.first;
2398
2399 switch(SPCC) {
2400 default: {
2401 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2403 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2404 }
2405 case SPCC::FCC_UL : {
2406 SDValue Mask = DAG.getConstant(1, DL, Result.getValueType());
2407 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2408 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2410 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2411 }
2412 case SPCC::FCC_ULE: {
2413 SDValue RHS = DAG.getConstant(2, DL, Result.getValueType());
2415 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2416 }
2417 case SPCC::FCC_UG : {
2418 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2419 SPCC = SPCC::ICC_G;
2420 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2421 }
2422 case SPCC::FCC_UGE: {
2423 SDValue RHS = DAG.getConstant(1, DL, Result.getValueType());
2425 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2426 }
2427
2428 case SPCC::FCC_U : {
2429 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2430 SPCC = SPCC::ICC_E;
2431 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2432 }
2433 case SPCC::FCC_O : {
2434 SDValue RHS = DAG.getConstant(3, DL, Result.getValueType());
2436 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2437 }
2438 case SPCC::FCC_LG : {
2439 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2440 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2441 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2443 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2444 }
2445 case SPCC::FCC_UE : {
2446 SDValue Mask = DAG.getConstant(3, DL, Result.getValueType());
2447 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2448 SDValue RHS = DAG.getConstant(0, DL, Result.getValueType());
2449 SPCC = SPCC::ICC_E;
2450 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2451 }
2452 }
2453}
2454
2455static SDValue
2457 const SparcTargetLowering &TLI) {
2458
2459 if (Op.getOperand(0).getValueType() == MVT::f64)
2460 return TLI.LowerF128Op(Op, DAG,
2461 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2462
2463 if (Op.getOperand(0).getValueType() == MVT::f32)
2464 return TLI.LowerF128Op(Op, DAG,
2465 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2466
2467 llvm_unreachable("fpextend with non-float operand!");
2468 return SDValue();
2469}
2470
2471static SDValue
2473 const SparcTargetLowering &TLI) {
2474 // FP_ROUND on f64 and f32 are legal.
2475 if (Op.getOperand(0).getValueType() != MVT::f128)
2476 return Op;
2477
2478 if (Op.getValueType() == MVT::f64)
2479 return TLI.LowerF128Op(Op, DAG,
2480 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2481 if (Op.getValueType() == MVT::f32)
2482 return TLI.LowerF128Op(Op, DAG,
2483 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2484
2485 llvm_unreachable("fpround to non-float!");
2486 return SDValue();
2487}
2488
2490 const SparcTargetLowering &TLI,
2491 bool hasHardQuad) {
2492 SDLoc dl(Op);
2493 EVT VT = Op.getValueType();
2494 assert(VT == MVT::i32 || VT == MVT::i64);
2495
2496 // Expand f128 operations to fp128 abi calls.
2497 if (Op.getOperand(0).getValueType() == MVT::f128
2498 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2499 const char *libName = TLI.getLibcallName(VT == MVT::i32
2500 ? RTLIB::FPTOSINT_F128_I32
2501 : RTLIB::FPTOSINT_F128_I64);
2502 return TLI.LowerF128Op(Op, DAG, libName, 1);
2503 }
2504
2505 // Expand if the resulting type is illegal.
2506 if (!TLI.isTypeLegal(VT))
2507 return SDValue();
2508
2509 // Otherwise, Convert the fp value to integer in an FP register.
2510 if (VT == MVT::i32)
2511 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2512 else
2513 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2514
2515 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2516}
2517
2519 const SparcTargetLowering &TLI,
2520 bool hasHardQuad) {
2521 SDLoc dl(Op);
2522 EVT OpVT = Op.getOperand(0).getValueType();
2523 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2524
2525 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2526
2527 // Expand f128 operations to fp128 ABI calls.
2528 if (Op.getValueType() == MVT::f128
2529 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2530 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2531 ? RTLIB::SINTTOFP_I32_F128
2532 : RTLIB::SINTTOFP_I64_F128);
2533 return TLI.LowerF128Op(Op, DAG, libName, 1);
2534 }
2535
2536 // Expand if the operand type is illegal.
2537 if (!TLI.isTypeLegal(OpVT))
2538 return SDValue();
2539
2540 // Otherwise, Convert the int value to FP in an FP register.
2541 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2542 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2543 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2544}
2545
2547 const SparcTargetLowering &TLI,
2548 bool hasHardQuad) {
2549 EVT VT = Op.getValueType();
2550
2551 // Expand if it does not involve f128 or the target has support for
2552 // quad floating point instructions and the resulting type is legal.
2553 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2554 (hasHardQuad && TLI.isTypeLegal(VT)))
2555 return SDValue();
2556
2557 assert(VT == MVT::i32 || VT == MVT::i64);
2558
2559 return TLI.LowerF128Op(Op, DAG,
2560 TLI.getLibcallName(VT == MVT::i32
2561 ? RTLIB::FPTOUINT_F128_I32
2562 : RTLIB::FPTOUINT_F128_I64),
2563 1);
2564}
2565
2567 const SparcTargetLowering &TLI,
2568 bool hasHardQuad) {
2569 EVT OpVT = Op.getOperand(0).getValueType();
2570 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2571
2572 // Expand if it does not involve f128 or the target has support for
2573 // quad floating point instructions and the operand type is legal.
2574 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2575 return SDValue();
2576
2577 return TLI.LowerF128Op(Op, DAG,
2578 TLI.getLibcallName(OpVT == MVT::i32
2579 ? RTLIB::UINTTOFP_I32_F128
2580 : RTLIB::UINTTOFP_I64_F128),
2581 1);
2582}
2583
2585 const SparcTargetLowering &TLI, bool hasHardQuad,
2586 bool isV9, bool is64Bit) {
2587 SDValue Chain = Op.getOperand(0);
2588 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2589 SDValue LHS = Op.getOperand(2);
2590 SDValue RHS = Op.getOperand(3);
2591 SDValue Dest = Op.getOperand(4);
2592 SDLoc dl(Op);
2593 unsigned Opc, SPCC = ~0U;
2594
2595 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2596 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2598 assert(LHS.getValueType() == RHS.getValueType());
2599
2600 // Get the condition flag.
2601 SDValue CompareFlag;
2602 if (LHS.getValueType().isInteger()) {
2603 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2604 // and the RHS is zero we might be able to use a specialized branch.
2605 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2607 return DAG.getNode(SPISD::BR_REG, dl, MVT::Other, Chain, Dest,
2608 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32),
2609 LHS);
2610
2611 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2612 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2613 if (isV9)
2614 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2615 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BPICC : SPISD::BPXCC;
2616 else
2617 // Non-v9 targets don't have xcc.
2618 Opc = SPISD::BRICC;
2619 } else {
2620 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2621 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2622 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2623 Opc = isV9 ? SPISD::BPICC : SPISD::BRICC;
2624 } else {
2625 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2626 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2627 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2628 Opc = isV9 ? SPISD::BRFCC_V9 : SPISD::BRFCC;
2629 }
2630 }
2631 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2632 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2633}
2634
2636 const SparcTargetLowering &TLI, bool hasHardQuad,
2637 bool isV9, bool is64Bit) {
2638 SDValue LHS = Op.getOperand(0);
2639 SDValue RHS = Op.getOperand(1);
2640 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2641 SDValue TrueVal = Op.getOperand(2);
2642 SDValue FalseVal = Op.getOperand(3);
2643 SDLoc dl(Op);
2644 unsigned Opc, SPCC = ~0U;
2645
2646 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2647 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2649 assert(LHS.getValueType() == RHS.getValueType());
2650
2651 SDValue CompareFlag;
2652 if (LHS.getValueType().isInteger()) {
2653 // On V9 processors running in 64-bit mode, if CC compares two `i64`s
2654 // and the RHS is zero we might be able to use a specialized select.
2655 // All SELECT_CC between any two scalar integer types are eligible for
2656 // lowering to specialized instructions. Additionally, f32 and f64 types
2657 // are also eligible, but for f128 we can only use the specialized
2658 // instruction when we have hardquad.
2659 EVT ValType = TrueVal.getValueType();
2660 bool IsEligibleType = ValType.isScalarInteger() || ValType == MVT::f32 ||
2661 ValType == MVT::f64 ||
2662 (ValType == MVT::f128 && hasHardQuad);
2663 if (is64Bit && isV9 && LHS.getValueType() == MVT::i64 &&
2664 isNullConstant(RHS) && !ISD::isUnsignedIntSetCC(CC) && IsEligibleType)
2665 return DAG.getNode(
2666 SPISD::SELECT_REG, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2667 DAG.getConstant(intCondCCodeToRcond(CC), dl, MVT::i32), LHS);
2668
2669 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2670 Opc = LHS.getValueType() == MVT::i32 ?
2671 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2672 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2673 } else {
2674 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2675 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2676 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2677 Opc = SPISD::SELECT_ICC;
2678 } else {
2679 unsigned CmpOpc = isV9 ? SPISD::CMPFCC_V9 : SPISD::CMPFCC;
2680 CompareFlag = DAG.getNode(CmpOpc, dl, MVT::Glue, LHS, RHS);
2681 Opc = SPISD::SELECT_FCC;
2682 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2683 }
2684 }
2685 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2686 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2687}
2688
2690 const SparcTargetLowering &TLI) {
2693 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2694
2695 // Need frame address to find the address of VarArgsFrameIndex.
2697
2698 // vastart just stores the address of the VarArgsFrameIndex slot into the
2699 // memory location argument.
2700 SDLoc DL(Op);
2701 SDValue Offset =
2702 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2703 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2704 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2705 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2706 MachinePointerInfo(SV));
2707}
2708
2710 SDNode *Node = Op.getNode();
2711 EVT VT = Node->getValueType(0);
2712 SDValue InChain = Node->getOperand(0);
2713 SDValue VAListPtr = Node->getOperand(1);
2714 EVT PtrVT = VAListPtr.getValueType();
2715 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2716 SDLoc DL(Node);
2717 SDValue VAList =
2718 DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
2719 // Increment the pointer, VAList, to the next vaarg.
2720 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2722 DL));
2723 // Store the incremented VAList to the legalized pointer.
2724 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
2725 MachinePointerInfo(SV));
2726 // Load the actual argument out of the pointer VAList.
2727 // We can't count on greater alignment than the word size.
2728 return DAG.getLoad(
2729 VT, DL, InChain, VAList, MachinePointerInfo(),
2730 Align(std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8));
2731}
2732
2734 const SparcSubtarget *Subtarget) {
2735 SDValue Chain = Op.getOperand(0);
2736 SDValue Size = Op.getOperand(1);
2737 SDValue Alignment = Op.getOperand(2);
2738 MaybeAlign MaybeAlignment =
2739 cast<ConstantSDNode>(Alignment)->getMaybeAlignValue();
2740 EVT VT = Size->getValueType(0);
2741 SDLoc dl(Op);
2742
2743 unsigned SPReg = SP::O6;
2744 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2745
2746 // The resultant pointer needs to be above the register spill area
2747 // at the bottom of the stack.
2748 unsigned regSpillArea;
2749 if (Subtarget->is64Bit()) {
2750 regSpillArea = 128;
2751 } else {
2752 // On Sparc32, the size of the spill area is 92. Unfortunately,
2753 // that's only 4-byte aligned, not 8-byte aligned (the stack
2754 // pointer is 8-byte aligned). So, if the user asked for an 8-byte
2755 // aligned dynamic allocation, we actually need to add 96 to the
2756 // bottom of the stack, instead of 92, to ensure 8-byte alignment.
2757
2758 // That also means adding 4 to the size of the allocation --
2759 // before applying the 8-byte rounding. Unfortunately, we the
2760 // value we get here has already had rounding applied. So, we need
2761 // to add 8, instead, wasting a bit more memory.
2762
2763 // Further, this only actually needs to be done if the required
2764 // alignment is > 4, but, we've lost that info by this point, too,
2765 // so we always apply it.
2766
2767 // (An alternative approach would be to always reserve 96 bytes
2768 // instead of the required 92, but then we'd waste 4 extra bytes
2769 // in every frame, not just those with dynamic stack allocations)
2770
2771 // TODO: modify code in SelectionDAGBuilder to make this less sad.
2772
2773 Size = DAG.getNode(ISD::ADD, dl, VT, Size,
2774 DAG.getConstant(8, dl, VT));
2775 regSpillArea = 96;
2776 }
2777
2778 int64_t Bias = Subtarget->getStackPointerBias();
2779
2780 // Debias and increment SP past the reserved spill area.
2781 // We need the SP to point to the first usable region before calculating
2782 // anything to prevent any of the pointers from becoming out of alignment when
2783 // we rebias the SP later on.
2784 SDValue StartOfUsableStack = DAG.getNode(
2785 ISD::ADD, dl, VT, SP, DAG.getConstant(regSpillArea + Bias, dl, VT));
2786 SDValue AllocatedPtr =
2787 DAG.getNode(ISD::SUB, dl, VT, StartOfUsableStack, Size);
2788
2789 bool IsOveraligned = MaybeAlignment.has_value();
2790 SDValue AlignedPtr =
2791 IsOveraligned
2792 ? DAG.getNode(ISD::AND, dl, VT, AllocatedPtr,
2793 DAG.getSignedConstant(-MaybeAlignment->value(), dl, VT))
2794 : AllocatedPtr;
2795
2796 // Now that we are done, restore the bias and reserved spill area.
2797 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, AlignedPtr,
2798 DAG.getConstant(regSpillArea + Bias, dl, VT));
2799 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);
2800 SDValue Ops[2] = {AlignedPtr, Chain};
2801 return DAG.getMergeValues(Ops, dl);
2802}
2803
2804
2806 SDLoc dl(Op);
2807 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2808 dl, MVT::Other, DAG.getEntryNode());
2809 return Chain;
2810}
2811
2813 const SparcSubtarget *Subtarget,
2814 bool AlwaysFlush = false) {
2816 MFI.setFrameAddressIsTaken(true);
2817
2818 EVT VT = Op.getValueType();
2819 SDLoc dl(Op);
2820 unsigned FrameReg = SP::I6;
2821 unsigned stackBias = Subtarget->getStackPointerBias();
2822
2823 SDValue FrameAddr;
2824 SDValue Chain;
2825
2826 // flush first to make sure the windowed registers' values are in stack
2827 Chain = (depth || AlwaysFlush) ? getFLUSHW(Op, DAG) : DAG.getEntryNode();
2828
2829 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2830
2831 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2832
2833 while (depth--) {
2834 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2835 DAG.getIntPtrConstant(Offset, dl));
2836 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
2837 }
2838 if (Subtarget->is64Bit())
2839 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2840 DAG.getIntPtrConstant(stackBias, dl));
2841 return FrameAddr;
2842}
2843
2844
2846 const SparcSubtarget *Subtarget) {
2847
2848 uint64_t depth = Op.getConstantOperandVal(0);
2849
2850 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2851
2852}
2853
2855 const SparcTargetLowering &TLI,
2856 const SparcSubtarget *Subtarget) {
2858 MachineFrameInfo &MFI = MF.getFrameInfo();
2859 MFI.setReturnAddressIsTaken(true);
2860
2861 EVT VT = Op.getValueType();
2862 SDLoc dl(Op);
2863 uint64_t depth = Op.getConstantOperandVal(0);
2864
2865 SDValue RetAddr;
2866 if (depth == 0) {
2867 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2868 Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2869 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2870 return RetAddr;
2871 }
2872
2873 // Need frame address to find return address of the caller.
2874 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget, true);
2875
2876 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2878 dl, VT,
2879 FrameAddr,
2880 DAG.getIntPtrConstant(Offset, dl));
2881 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2882
2883 return RetAddr;
2884}
2885
2886static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
2887 unsigned opcode) {
2888 assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2889 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2890
2891 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2892 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2893 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2894
2895 // Note: in little-endian, the floating-point value is stored in the
2896 // registers are in the opposite order, so the subreg with the sign
2897 // bit is the highest-numbered (odd), rather than the
2898 // lowest-numbered (even).
2899
2900 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2901 SrcReg64);
2902 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2903 SrcReg64);
2904
2905 if (DAG.getDataLayout().isLittleEndian())
2906 Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
2907 else
2908 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2909
2910 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2911 dl, MVT::f64), 0);
2912 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2913 DstReg64, Hi32);
2914 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2915 DstReg64, Lo32);
2916 return DstReg64;
2917}
2918
2919// Lower a f128 load into two f64 loads.
2921{
2922 SDLoc dl(Op);
2923 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2924 assert(LdNode->getOffset().isUndef() && "Unexpected node type");
2925
2926 Align Alignment = commonAlignment(LdNode->getBaseAlign(), 8);
2927
2928 SDValue Hi64 =
2929 DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
2930 LdNode->getPointerInfo(), Alignment);
2931 EVT addrVT = LdNode->getBasePtr().getValueType();
2932 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2933 LdNode->getBasePtr(),
2934 DAG.getConstant(8, dl, addrVT));
2935 SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
2936 LdNode->getPointerInfo().getWithOffset(8),
2937 Alignment);
2938
2939 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2940 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2941
2942 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2943 dl, MVT::f128);
2944 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2945 MVT::f128,
2946 SDValue(InFP128, 0),
2947 Hi64,
2948 SubRegEven);
2949 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2950 MVT::f128,
2951 SDValue(InFP128, 0),
2952 Lo64,
2953 SubRegOdd);
2954 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2955 SDValue(Lo64.getNode(), 1) };
2956 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2957 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2958 return DAG.getMergeValues(Ops, dl);
2959}
2960
2962{
2963 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2964
2965 EVT MemVT = LdNode->getMemoryVT();
2966 if (MemVT == MVT::f128)
2967 return LowerF128Load(Op, DAG);
2968
2969 return Op;
2970}
2971
2972// Lower a f128 store into two f64 stores.
2974 SDLoc dl(Op);
2975 StoreSDNode *StNode = cast<StoreSDNode>(Op.getNode());
2976 assert(StNode->getOffset().isUndef() && "Unexpected node type");
2977
2978 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2979 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2980
2981 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2982 dl,
2983 MVT::f64,
2984 StNode->getValue(),
2985 SubRegEven);
2986 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2987 dl,
2988 MVT::f64,
2989 StNode->getValue(),
2990 SubRegOdd);
2991
2992 Align Alignment = commonAlignment(StNode->getBaseAlign(), 8);
2993
2994 SDValue OutChains[2];
2995 OutChains[0] =
2996 DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
2997 StNode->getBasePtr(), StNode->getPointerInfo(),
2998 Alignment);
2999 EVT addrVT = StNode->getBasePtr().getValueType();
3000 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
3001 StNode->getBasePtr(),
3002 DAG.getConstant(8, dl, addrVT));
3003 OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
3004 StNode->getPointerInfo().getWithOffset(8),
3005 Alignment);
3006 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
3007}
3008
3010{
3011 SDLoc dl(Op);
3012 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
3013
3014 EVT MemVT = St->getMemoryVT();
3015 if (MemVT == MVT::f128)
3016 return LowerF128Store(Op, DAG);
3017
3018 if (MemVT == MVT::i64) {
3019 // Custom handling for i64 stores: turn it into a bitcast and a
3020 // v2i32 store.
3021 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
3022 SDValue Chain = DAG.getStore(
3023 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
3024 St->getBaseAlign(), St->getMemOperand()->getFlags(), St->getAAInfo());
3025 return Chain;
3026 }
3027
3028 return SDValue();
3029}
3030
3032 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
3033 && "invalid opcode");
3034
3035 SDLoc dl(Op);
3036
3037 if (Op.getValueType() == MVT::f64)
3038 return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
3039 if (Op.getValueType() != MVT::f128)
3040 return Op;
3041
3042 // Lower fabs/fneg on f128 to fabs/fneg on f64
3043 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
3044 // (As with LowerF64Op, on little-endian, we need to negate the odd
3045 // subreg)
3046
3047 SDValue SrcReg128 = Op.getOperand(0);
3048 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
3049 SrcReg128);
3050 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
3051 SrcReg128);
3052
3053 if (DAG.getDataLayout().isLittleEndian()) {
3054 if (isV9)
3055 Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
3056 else
3057 Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
3058 } else {
3059 if (isV9)
3060 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
3061 else
3062 Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
3063 }
3064
3065 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
3066 dl, MVT::f128), 0);
3067 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
3068 DstReg128, Hi64);
3069 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
3070 DstReg128, Lo64);
3071 return DstReg128;
3072}
3073
3075 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getSuccessOrdering())) {
3076 // Expand with a fence.
3077 return SDValue();
3078 }
3079
3080 // Monotonic load/stores are legal.
3081 return Op;
3082}
3083
3085 SelectionDAG &DAG) const {
3086 unsigned IntNo = Op.getConstantOperandVal(0);
3087 switch (IntNo) {
3088 default: return SDValue(); // Don't custom lower most intrinsics.
3089 case Intrinsic::thread_pointer: {
3090 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3091 return DAG.getRegister(SP::G7, PtrVT);
3092 }
3093 }
3094}
3095
3098
3099 bool hasHardQuad = Subtarget->hasHardQuad();
3100 bool isV9 = Subtarget->isV9();
3101 bool is64Bit = Subtarget->is64Bit();
3102
3103 switch (Op.getOpcode()) {
3104 default: llvm_unreachable("Should not custom lower this!");
3105
3106 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
3107 Subtarget);
3108 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
3109 Subtarget);
3111 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
3112 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3113 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3114 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
3115 hasHardQuad);
3116 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
3117 hasHardQuad);
3118 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
3119 hasHardQuad);
3120 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
3121 hasHardQuad);
3122 case ISD::BR_CC:
3123 return LowerBR_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3124 case ISD::SELECT_CC:
3125 return LowerSELECT_CC(Op, DAG, *this, hasHardQuad, isV9, is64Bit);
3126 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
3127 case ISD::VAARG: return LowerVAARG(Op, DAG);
3128 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
3129 Subtarget);
3130
3131 case ISD::LOAD: return LowerLOAD(Op, DAG);
3132 case ISD::STORE: return LowerSTORE(Op, DAG);
3133 case ISD::FADD: return LowerF128Op(Op, DAG,
3134 getLibcallName(RTLIB::ADD_F128), 2);
3135 case ISD::FSUB: return LowerF128Op(Op, DAG,
3136 getLibcallName(RTLIB::SUB_F128), 2);
3137 case ISD::FMUL: return LowerF128Op(Op, DAG,
3138 getLibcallName(RTLIB::MUL_F128), 2);
3139 case ISD::FDIV: return LowerF128Op(Op, DAG,
3140 getLibcallName(RTLIB::DIV_F128), 2);
3141 case ISD::FSQRT: return LowerF128Op(Op, DAG,
3142 getLibcallName(RTLIB::SQRT_F128),1);
3143 case ISD::FABS:
3144 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
3145 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
3146 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
3147 case ISD::ATOMIC_LOAD:
3148 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
3150 }
3151}
3152
3154 const SDLoc &DL,
3155 SelectionDAG &DAG) const {
3156 APInt V = C->getValueAPF().bitcastToAPInt();
3157 SDValue Lo = DAG.getConstant(V.zextOrTrunc(32), DL, MVT::i32);
3158 SDValue Hi = DAG.getConstant(V.lshr(32).zextOrTrunc(32), DL, MVT::i32);
3159 if (DAG.getDataLayout().isLittleEndian())
3160 std::swap(Lo, Hi);
3161 return DAG.getBuildVector(MVT::v2i32, DL, {Hi, Lo});
3162}
3163
3165 DAGCombinerInfo &DCI) const {
3166 SDLoc dl(N);
3167 SDValue Src = N->getOperand(0);
3168
3169 if (isa<ConstantFPSDNode>(Src) && N->getSimpleValueType(0) == MVT::v2i32 &&
3170 Src.getSimpleValueType() == MVT::f64)
3172
3173 return SDValue();
3174}
3175
3177 DAGCombinerInfo &DCI) const {
3178 switch (N->getOpcode()) {
3179 default:
3180 break;
3181 case ISD::BITCAST:
3182 return PerformBITCASTCombine(N, DCI);
3183 }
3184 return SDValue();
3185}
3186
3189 MachineBasicBlock *BB) const {
3190 switch (MI.getOpcode()) {
3191 default: llvm_unreachable("Unknown SELECT_CC!");
3192 case SP::SELECT_CC_Int_ICC:
3193 case SP::SELECT_CC_FP_ICC:
3194 case SP::SELECT_CC_DFP_ICC:
3195 case SP::SELECT_CC_QFP_ICC:
3196 if (Subtarget->isV9())
3197 return expandSelectCC(MI, BB, SP::BPICC);
3198 return expandSelectCC(MI, BB, SP::BCOND);
3199 case SP::SELECT_CC_Int_XCC:
3200 case SP::SELECT_CC_FP_XCC:
3201 case SP::SELECT_CC_DFP_XCC:
3202 case SP::SELECT_CC_QFP_XCC:
3203 return expandSelectCC(MI, BB, SP::BPXCC);
3204 case SP::SELECT_CC_Int_FCC:
3205 case SP::SELECT_CC_FP_FCC:
3206 case SP::SELECT_CC_DFP_FCC:
3207 case SP::SELECT_CC_QFP_FCC:
3208 if (Subtarget->isV9())
3209 return expandSelectCC(MI, BB, SP::FBCOND_V9);
3210 return expandSelectCC(MI, BB, SP::FBCOND);
3211 }
3212}
3213
3216 unsigned BROpcode) const {
3217 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3218 DebugLoc dl = MI.getDebugLoc();
3219 unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
3220
3221 // To "insert" a SELECT_CC instruction, we actually have to insert the
3222 // triangle control-flow pattern. The incoming instruction knows the
3223 // destination vreg to set, the condition code register to branch on, the
3224 // true/false values to select between, and the condition code for the branch.
3225 //
3226 // We produce the following control flow:
3227 // ThisMBB
3228 // | \
3229 // | IfFalseMBB
3230 // | /
3231 // SinkMBB
3232 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3234
3235 MachineBasicBlock *ThisMBB = BB;
3236 MachineFunction *F = BB->getParent();
3237 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
3238 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3239 F->insert(It, IfFalseMBB);
3240 F->insert(It, SinkMBB);
3241
3242 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
3243 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
3244 std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
3245 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
3246
3247 // Set the new successors for ThisMBB.
3248 ThisMBB->addSuccessor(IfFalseMBB);
3249 ThisMBB->addSuccessor(SinkMBB);
3250
3251 BuildMI(ThisMBB, dl, TII.get(BROpcode))
3252 .addMBB(SinkMBB)
3253 .addImm(CC);
3254
3255 // IfFalseMBB just falls through to SinkMBB.
3256 IfFalseMBB->addSuccessor(SinkMBB);
3257
3258 // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
3259 BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
3260 MI.getOperand(0).getReg())
3261 .addReg(MI.getOperand(1).getReg())
3262 .addMBB(ThisMBB)
3263 .addReg(MI.getOperand(2).getReg())
3264 .addMBB(IfFalseMBB);
3265
3266 MI.eraseFromParent(); // The pseudo instruction is gone now.
3267 return SinkMBB;
3268}
3269
3270//===----------------------------------------------------------------------===//
3271// Sparc Inline Assembly Support
3272//===----------------------------------------------------------------------===//
3273
3274/// getConstraintType - Given a constraint letter, return the type of
3275/// constraint it is for this target.
3278 if (Constraint.size() == 1) {
3279 switch (Constraint[0]) {
3280 default: break;
3281 case 'r':
3282 case 'f':
3283 case 'e':
3284 return C_RegisterClass;
3285 case 'I': // SIMM13
3286 return C_Immediate;
3287 }
3288 }
3289
3290 return TargetLowering::getConstraintType(Constraint);
3291}
3292
3295 const char *constraint) const {
3297 Value *CallOperandVal = info.CallOperandVal;
3298 // If we don't have a value, we can't do a match,
3299 // but allow it at the lowest weight.
3300 if (!CallOperandVal)
3301 return CW_Default;
3302
3303 // Look at the constraint type.
3304 switch (*constraint) {
3305 default:
3307 break;
3308 case 'I': // SIMM13
3309 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3310 if (isInt<13>(C->getSExtValue()))
3311 weight = CW_Constant;
3312 }
3313 break;
3314 }
3315 return weight;
3316}
3317
3318/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3319/// vector. If it is invalid, don't add anything to Ops.
3321 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3322 SelectionDAG &DAG) const {
3323 SDValue Result;
3324
3325 // Only support length 1 constraints for now.
3326 if (Constraint.size() > 1)
3327 return;
3328
3329 char ConstraintLetter = Constraint[0];
3330 switch (ConstraintLetter) {
3331 default: break;
3332 case 'I':
3334 if (isInt<13>(C->getSExtValue())) {
3335 Result = DAG.getSignedTargetConstant(C->getSExtValue(), SDLoc(Op),
3336 Op.getValueType());
3337 break;
3338 }
3339 return;
3340 }
3341 }
3342
3343 if (Result.getNode()) {
3344 Ops.push_back(Result);
3345 return;
3346 }
3348}
3349
3350std::pair<unsigned, const TargetRegisterClass *>
3352 StringRef Constraint,
3353 MVT VT) const {
3354 if (Constraint.empty())
3355 return std::make_pair(0U, nullptr);
3356
3357 if (Constraint.size() == 1) {
3358 switch (Constraint[0]) {
3359 case 'r':
3360 if (VT == MVT::v2i32)
3361 return std::make_pair(0U, &SP::IntPairRegClass);
3362 else if (Subtarget->is64Bit())
3363 return std::make_pair(0U, &SP::I64RegsRegClass);
3364 else
3365 return std::make_pair(0U, &SP::IntRegsRegClass);
3366 case 'f':
3367 if (VT == MVT::f32 || VT == MVT::i32)
3368 return std::make_pair(0U, &SP::FPRegsRegClass);
3369 else if (VT == MVT::f64 || VT == MVT::i64)
3370 return std::make_pair(0U, &SP::LowDFPRegsRegClass);
3371 else if (VT == MVT::f128)
3372 return std::make_pair(0U, &SP::LowQFPRegsRegClass);
3373 // This will generate an error message
3374 return std::make_pair(0U, nullptr);
3375 case 'e':
3376 if (VT == MVT::f32 || VT == MVT::i32)
3377 return std::make_pair(0U, &SP::FPRegsRegClass);
3378 else if (VT == MVT::f64 || VT == MVT::i64 )
3379 return std::make_pair(0U, &SP::DFPRegsRegClass);
3380 else if (VT == MVT::f128)
3381 return std::make_pair(0U, &SP::QFPRegsRegClass);
3382 // This will generate an error message
3383 return std::make_pair(0U, nullptr);
3384 }
3385 }
3386
3387 if (Constraint.front() != '{')
3388 return std::make_pair(0U, nullptr);
3389
3390 assert(Constraint.back() == '}' && "Not a brace enclosed constraint?");
3391 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
3392 if (RegName.empty())
3393 return std::make_pair(0U, nullptr);
3394
3395 unsigned long long RegNo;
3396 // Handle numbered register aliases.
3397 if (RegName[0] == 'r' &&
3398 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3399 // r0-r7 -> g0-g7
3400 // r8-r15 -> o0-o7
3401 // r16-r23 -> l0-l7
3402 // r24-r31 -> i0-i7
3403 if (RegNo > 31)
3404 return std::make_pair(0U, nullptr);
3405 const char RegTypes[] = {'g', 'o', 'l', 'i'};
3406 char RegType = RegTypes[RegNo / 8];
3407 char RegIndex = '0' + (RegNo % 8);
3408 char Tmp[] = {'{', RegType, RegIndex, '}', 0};
3409 return getRegForInlineAsmConstraint(TRI, Tmp, VT);
3410 }
3411
3412 // Rewrite the fN constraint according to the value type if needed.
3413 if (VT != MVT::f32 && VT != MVT::Other && RegName[0] == 'f' &&
3414 getAsUnsignedInteger(RegName.begin() + 1, 10, RegNo)) {
3415 if (VT == MVT::f64 && (RegNo % 2 == 0)) {
3417 TRI, StringRef("{d" + utostr(RegNo / 2) + "}"), VT);
3418 } else if (VT == MVT::f128 && (RegNo % 4 == 0)) {
3420 TRI, StringRef("{q" + utostr(RegNo / 4) + "}"), VT);
3421 } else {
3422 return std::make_pair(0U, nullptr);
3423 }
3424 }
3425
3426 auto ResultPair =
3428 if (!ResultPair.second)
3429 return std::make_pair(0U, nullptr);
3430
3431 // Force the use of I64Regs over IntRegs for 64-bit values.
3432 if (Subtarget->is64Bit() && VT == MVT::i64) {
3433 assert(ResultPair.second == &SP::IntRegsRegClass &&
3434 "Unexpected register class");
3435 return std::make_pair(ResultPair.first, &SP::I64RegsRegClass);
3436 }
3437
3438 return ResultPair;
3439}
3440
3441bool
3443 // The Sparc target isn't yet aware of offsets.
3444 return false;
3445}
3446
3449 SelectionDAG &DAG) const {
3450
3451 SDLoc dl(N);
3452
3453 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3454
3455 switch (N->getOpcode()) {
3456 default:
3457 llvm_unreachable("Do not know how to custom type legalize this operation!");
3458
3459 case ISD::FP_TO_SINT:
3460 case ISD::FP_TO_UINT:
3461 // Custom lower only if it involves f128 or i64.
3462 if (N->getOperand(0).getValueType() != MVT::f128
3463 || N->getValueType(0) != MVT::i64)
3464 return;
3465 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3466 ? RTLIB::FPTOSINT_F128_I64
3467 : RTLIB::FPTOUINT_F128_I64);
3468
3469 Results.push_back(LowerF128Op(SDValue(N, 0),
3470 DAG,
3471 getLibcallName(libCall),
3472 1));
3473 return;
3474 case ISD::READCYCLECOUNTER: {
3475 assert(Subtarget->hasLeonCycleCounter());
3476 SDValue Lo = DAG.getCopyFromReg(N->getOperand(0), dl, SP::ASR23, MVT::i32);
3477 SDValue Hi = DAG.getCopyFromReg(Lo, dl, SP::G0, MVT::i32);
3478 SDValue Ops[] = { Lo, Hi };
3479 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops);
3480 Results.push_back(Pair);
3481 Results.push_back(N->getOperand(0));
3482 return;
3483 }
3484 case ISD::SINT_TO_FP:
3485 case ISD::UINT_TO_FP:
3486 // Custom lower only if it involves f128 or i64.
3487 if (N->getValueType(0) != MVT::f128
3488 || N->getOperand(0).getValueType() != MVT::i64)
3489 return;
3490
3491 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3492 ? RTLIB::SINTTOFP_I64_F128
3493 : RTLIB::UINTTOFP_I64_F128);
3494
3495 Results.push_back(LowerF128Op(SDValue(N, 0),
3496 DAG,
3497 getLibcallName(libCall),
3498 1));
3499 return;
3500 case ISD::LOAD: {
3502 // Custom handling only for i64: turn i64 load into a v2i32 load,
3503 // and a bitcast.
3504 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3505 return;
3506
3507 SDLoc dl(N);
3508 SDValue LoadRes = DAG.getExtLoad(
3509 Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
3510 Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getBaseAlign(),
3511 Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
3512
3513 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3514 Results.push_back(Res);
3515 Results.push_back(LoadRes.getValue(1));
3516 return;
3517 }
3518 }
3519}
3520
3521// Override to enable LOAD_STACK_GUARD lowering on Linux.
3523 if (!Subtarget->getTargetTriple().isOSLinux())
3525 return true;
3526}
3527
3529 if (Subtarget->isVIS3())
3530 return VT == MVT::f32 || VT == MVT::f64;
3531 return false;
3532}
3533
3535 bool ForCodeSize) const {
3536 if (VT != MVT::f32 && VT != MVT::f64)
3537 return false;
3538 if (Subtarget->isVIS() && Imm.isZero())
3539 return true;
3540 if (Subtarget->isVIS3())
3541 return Imm.isExactlyValue(+0.5) || Imm.isExactlyValue(-0.5) ||
3542 Imm.getExactLog2Abs() == -1;
3543 return false;
3544}
3545
3546bool SparcTargetLowering::isCtlzFast() const { return Subtarget->isVIS3(); }
3547
3549 // We lack native cttz, however,
3550 // On 64-bit targets it is cheap to implement it in terms of popc.
3551 if (Subtarget->is64Bit() && Subtarget->usePopc())
3552 return true;
3553 // Otherwise, implementing cttz in terms of ctlz is still cheap.
3554 return isCheapToSpeculateCtlz(Ty);
3555}
3556
3558 EVT VT) const {
3559 return Subtarget->isUA2007() && !Subtarget->useSoftFloat();
3560}
3561
3563 SDNode *Node) const {
3564 assert(MI.getOpcode() == SP::SUBCCrr || MI.getOpcode() == SP::SUBCCri);
3565 // If the result is dead, replace it with %g0.
3566 if (!Node->hasAnyUseOfValue(0))
3567 MI.getOperand(0).setReg(SP::G0);
3568}
3569
3571 Instruction *Inst,
3572 AtomicOrdering Ord) const {
3573 bool HasStoreSemantics =
3575 if (HasStoreSemantics && isReleaseOrStronger(Ord))
3576 return Builder.CreateFence(AtomicOrdering::Release);
3577 return nullptr;
3578}
3579
3581 Instruction *Inst,
3582 AtomicOrdering Ord) const {
3583 // V8 loads already come with implicit acquire barrier so there's no need to
3584 // emit it again.
3585 bool HasLoadSemantics = isa<AtomicCmpXchgInst, AtomicRMWInst, LoadInst>(Inst);
3586 if (Subtarget->isV9() && HasLoadSemantics && isAcquireOrStronger(Ord))
3587 return Builder.CreateFence(AtomicOrdering::Acquire);
3588
3589 // SC plain stores would need a trailing full barrier.
3591 return Builder.CreateFence(Ord);
3592 return nullptr;
3593}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define RegName(no)
static LPCC::CondCode IntCondCCodeToICC(SDValue CC, const SDLoc &DL, SDValue &RHS, SelectionDAG &DAG)
lazy value info
#define F(x, y, z)
Definition MD5.cpp:55
#define G(x, y, z)
Definition MD5.cpp:56
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static constexpr MCPhysReg SPReg
static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget, bool AlwaysFlush=false)
static unsigned toCallerWindow(unsigned Reg)
static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG)
static SPCC::CondCodes intCondCCodeToRcond(ISD::CondCode CC)
intCondCCodeToRcond - Convert a DAG integer condition code to a SPARC rcond condition.
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
static void fixupVariableFloatArgs(SmallVectorImpl< CCValAssign > &ArgLocs, ArrayRef< ISD::OutputArg > Outs)
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC)
FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC FCC condition.
static bool isAnyArgRegReserved(const SparcRegisterInfo *TRI, const MachineFunction &MF)
static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG)
static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee, const CallBase *Call)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG, unsigned opcode)
static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static void emitReservedArgRegCallError(const MachineFunction &MF)
static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG)
static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad, bool isV9, bool is64Bit)
static SDValue LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI)
static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9)
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC)
static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
static bool is64Bit(const char *name)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
Definition BasicBlock.h:62
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
bool needsCustom() const
bool isExtInLoc() const
int64_t getLocMemOffset() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition DataLayout.h:207
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
const Function & getFunction() const
Definition Function.h:164
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isUndef() const
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getStackPointerBias() const
The 64-bit ABI uses biased stack and frame pointers, so the stack frame of the current function is th...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool useSoftFloat() const override
SDValue bitcastConstantFPToInt(ConstantFPSDNode *C, const SDLoc &DL, SelectionDAG &DAG) const
MachineBasicBlock * expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB, unsigned BROpcode) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
LowerFormalArguments32 - V8 uses a very simple ABI, where all values are passed in either one or two ...
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool IsEligibleForTailCallOptimization(CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg, const SDLoc &DL, SelectionDAG &DAG) const
SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF, SelectionDAG &DAG) const
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG, const char *LibFuncName, unsigned numArgs) const
SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue PerformBITCASTCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const
bool useLoadStackGuardNode(const Module &M) const override
Override to support customized stack guard loading.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const
SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI)
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC, const SDLoc &DL, SelectionDAG &DAG) const
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
char back() const
back - Get the last character in the string.
Definition StringRef.h:155
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
char front() const
front - Get the first character in the string.
Definition StringRef.h:149
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void setLibcallImpl(RTLIB::Libcall Call, RTLIB::LibcallImpl Impl)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
bool isFP128Ty() const
Return true if this is 'fp128'.
Definition Type.h:162
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:868
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ GlobalTLSAddress
Definition ISDOpcodes.h:89
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:779
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:228
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:762
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:569
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:914
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:736
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:558
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:947
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:549
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
@ FCC_ULE
Definition Sparc.h:74
@ FCC_UG
Definition Sparc.h:64
@ ICC_G
Definition Sparc.h:46
@ REG_LEZ
Definition Sparc.h:97
@ ICC_L
Definition Sparc.h:49
@ FCC_NE
Definition Sparc.h:68
@ ICC_CS
Definition Sparc.h:53
@ FCC_LG
Definition Sparc.h:67
@ ICC_LEU
Definition Sparc.h:51
@ FCC_LE
Definition Sparc.h:73
@ ICC_LE
Definition Sparc.h:47
@ FCC_U
Definition Sparc.h:62
@ ICC_GE
Definition Sparc.h:48
@ FCC_E
Definition Sparc.h:69
@ REG_LZ
Definition Sparc.h:98
@ FCC_L
Definition Sparc.h:65
@ ICC_GU
Definition Sparc.h:50
@ FCC_O
Definition Sparc.h:75
@ ICC_NE
Definition Sparc.h:44
@ FCC_UE
Definition Sparc.h:70
@ REG_NZ
Definition Sparc.h:99
@ ICC_E
Definition Sparc.h:45
@ FCC_GE
Definition Sparc.h:71
@ FCC_UGE
Definition Sparc.h:72
@ REG_Z
Definition Sparc.h:96
@ ICC_CC
Definition Sparc.h:52
@ REG_GEZ
Definition Sparc.h:101
@ FCC_G
Definition Sparc.h:63
@ FCC_UL
Definition Sparc.h:66
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
std::string utostr(uint64_t X, bool isNeg=false)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
bool isAcquireOrStronger(AtomicOrdering AO)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
LLVM_ABI bool getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long &Result)
Helper functions for StringRef::getAsInteger.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
Definition ValueTypes.h:94
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition KnownBits.h:311
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
SmallVector< ISD::OutputArg, 32 > Outs
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})