001/*
002 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
003 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
004 *
005 * This code is free software; you can redistribute it and/or modify it
006 * under the terms of the GNU General Public License version 2 only, as
007 * published by the Free Software Foundation.
008 *
009 * This code is distributed in the hope that it will be useful, but WITHOUT
010 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
011 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
012 * version 2 for more details (a copy is included in the LICENSE file that
013 * accompanied this code).
014 *
015 * You should have received a copy of the GNU General Public License version
016 * 2 along with this work; if not, write to the Free Software Foundation,
017 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
018 *
019 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
020 * or visit www.oracle.com if you need additional information or have any
021 * questions.
022 */
023package com.oracle.graal.lir.amd64;
024
025import jdk.internal.jvmci.amd64.*;
026import jdk.internal.jvmci.code.*;
027import jdk.internal.jvmci.code.CompilationResult.*;
028import jdk.internal.jvmci.common.*;
029import jdk.internal.jvmci.meta.*;
030import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
031import static jdk.internal.jvmci.code.ValueUtil.*;
032
033import com.oracle.graal.asm.*;
034import com.oracle.graal.asm.amd64.*;
035import com.oracle.graal.asm.amd64.AMD64Address.*;
036import com.oracle.graal.asm.amd64.AMD64Assembler.*;
037import com.oracle.graal.compiler.common.calc.*;
038import com.oracle.graal.lir.*;
039import com.oracle.graal.lir.StandardOp.BlockEndOp;
040import com.oracle.graal.lir.SwitchStrategy.BaseSwitchClosure;
041import com.oracle.graal.lir.asm.*;
042
043public class AMD64ControlFlow {
044
045    public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
046        public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
047        @Use({REG, ILLEGAL}) protected Value x;
048
049        public ReturnOp(Value x) {
050            super(TYPE);
051            this.x = x;
052        }
053
054        @Override
055        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
056            crb.frameContext.leave(crb);
057            masm.ret(0);
058        }
059    }
060
061    public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
062        public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
063        protected final ConditionFlag condition;
064        protected final LabelRef trueDestination;
065        protected final LabelRef falseDestination;
066
067        private final double trueDestinationProbability;
068
069        public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
070            this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
071        }
072
073        public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
074            this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
075        }
076
077        protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
078            super(c);
079            this.condition = condition;
080            this.trueDestination = trueDestination;
081            this.falseDestination = falseDestination;
082            this.trueDestinationProbability = trueDestinationProbability;
083        }
084
085        @Override
086        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
087            /*
088             * The strategy for emitting jumps is: If either trueDestination or falseDestination is
089             * the successor block, assume the block scheduler did the correct thing and jcc to the
090             * other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
091             * sure it is more likely to branch on the jcc (= less likely to execute both the jcc
092             * and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
093             * back-edge.
094             */
095            if (crb.isSuccessorEdge(trueDestination)) {
096                jcc(masm, true, falseDestination);
097            } else if (crb.isSuccessorEdge(falseDestination)) {
098                jcc(masm, false, trueDestination);
099            } else if (trueDestinationProbability < 0.5) {
100                jcc(masm, true, falseDestination);
101                masm.jmp(trueDestination.label());
102            } else {
103                jcc(masm, false, trueDestination);
104                masm.jmp(falseDestination.label());
105            }
106        }
107
108        protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
109            masm.jcc(negate ? condition.negate() : condition, target.label());
110        }
111    }
112
113    public static final class FloatBranchOp extends BranchOp {
114        public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
115        protected boolean unorderedIsTrue;
116
117        public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
118            super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
119            this.unorderedIsTrue = unorderedIsTrue;
120        }
121
122        @Override
123        protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
124            floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
125        }
126    }
127
128    public static final class StrategySwitchOp extends AMD64BlockEndOp {
129        public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
130        @Use({CONST}) protected JavaConstant[] keyConstants;
131        private final LabelRef[] keyTargets;
132        private LabelRef defaultTarget;
133        @Alive({REG}) protected Value key;
134        @Temp({REG, ILLEGAL}) protected Value scratch;
135        private final SwitchStrategy strategy;
136
137        public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
138            super(TYPE);
139            this.strategy = strategy;
140            this.keyConstants = strategy.keyConstants;
141            this.keyTargets = keyTargets;
142            this.defaultTarget = defaultTarget;
143            this.key = key;
144            this.scratch = scratch;
145            assert keyConstants.length == keyTargets.length;
146            assert keyConstants.length == strategy.keyProbabilities.length;
147            assert (scratch.getKind() == Kind.Illegal) == (key.getKind() == Kind.Int || key.getKind() == Kind.Long);
148        }
149
150        @Override
151        public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
152            final Register keyRegister = asRegister(key);
153
154            BaseSwitchClosure closure = new BaseSwitchClosure(crb, masm, keyTargets, defaultTarget) {
155                @Override
156                protected void conditionalJump(int index, Condition condition, Label target) {
157                    switch (key.getKind()) {
158                        case Int:
159                            if (crb.codeCache.needsDataPatch(keyConstants[index])) {
160                                crb.recordInlineDataInCode(keyConstants[index]);
161                            }
162                            long lc = keyConstants[index].asLong();
163                            assert NumUtil.isInt(lc);
164                            masm.cmpl(keyRegister, (int) lc);
165                            break;
166                        case Long:
167                            masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(keyConstants[index]));
168                            break;
169                        case Object:
170                            assert condition == Condition.EQ || condition == Condition.NE;
171                            AMD64Move.move(crb, masm, scratch, keyConstants[index]);
172                            masm.cmpptr(keyRegister, asObjectReg(scratch));
173                            break;
174                        default:
175                            throw new JVMCIError("switch only supported for int, long and object");
176                    }
177                    masm.jcc(intCond(condition), target);
178                }
179            };
180            strategy.run(closure);
181        }
182    }
183
184    public static final class TableSwitchOp extends AMD64BlockEndOp {
185        public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
186        private final int lowKey;
187        private final LabelRef defaultTarget;
188        private final LabelRef[] targets;
189        @Use protected Value index;
190        @Temp({REG, HINT}) protected Value idxScratch;
191        @Temp protected Value scratch;
192
193        public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
194            super(TYPE);
195            this.lowKey = lowKey;
196            this.defaultTarget = defaultTarget;
197            this.targets = targets;
198            this.index = index;
199            this.scratch = scratch;
200            this.idxScratch = idxScratch;
201        }
202
203        @Override
204        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
205            Register indexReg = asIntReg(index);
206            Register idxScratchReg = asIntReg(idxScratch);
207            Register scratchReg = asLongReg(scratch);
208
209            if (!indexReg.equals(idxScratchReg)) {
210                masm.movl(idxScratchReg, indexReg);
211            }
212
213            // Compare index against jump table bounds
214            int highKey = lowKey + targets.length - 1;
215            if (lowKey != 0) {
216                // subtract the low value from the switch value
217                masm.subl(idxScratchReg, lowKey);
218                masm.cmpl(idxScratchReg, highKey - lowKey);
219            } else {
220                masm.cmpl(idxScratchReg, highKey);
221            }
222
223            // Jump to default target if index is not within the jump table
224            if (defaultTarget != null) {
225                masm.jcc(ConditionFlag.Above, defaultTarget.label());
226            }
227
228            // Set scratch to address of jump table
229            masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
230            final int afterLea = masm.position();
231
232            // Load jump table entry into scratch and jump to it
233            masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
234            masm.addq(scratchReg, idxScratchReg);
235            masm.jmp(scratchReg);
236
237            // Inserting padding so that jump table address is 4-byte aligned
238            if ((masm.position() & 0x3) != 0) {
239                masm.nop(4 - (masm.position() & 0x3));
240            }
241
242            // Patch LEA instruction above now that we know the position of the jump table
243            // TODO this is ugly and should be done differently
244            final int jumpTablePos = masm.position();
245            final int leaDisplacementPosition = afterLea - 4;
246            masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
247
248            // Emit jump table entries
249            for (LabelRef target : targets) {
250                Label label = target.label();
251                int offsetToJumpTableBase = masm.position() - jumpTablePos;
252                if (label.isBound()) {
253                    int imm32 = label.position() - jumpTablePos;
254                    masm.emitInt(imm32);
255                } else {
256                    label.addPatchAt(masm.position());
257
258                    masm.emitByte(0); // pseudo-opcode for jump table entry
259                    masm.emitShort(offsetToJumpTableBase);
260                    masm.emitByte(0); // padding to make jump table entry 4 bytes wide
261                }
262            }
263
264            JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
265            crb.compilationResult.addAnnotation(jt);
266        }
267    }
268
269    @Opcode("CMOVE")
270    public static final class CondMoveOp extends AMD64LIRInstruction {
271        public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
272        @Def({REG, HINT}) protected Value result;
273        @Alive({REG}) protected Value trueValue;
274        @Use({REG, STACK, CONST}) protected Value falseValue;
275        private final ConditionFlag condition;
276
277        public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
278            super(TYPE);
279            this.result = result;
280            this.condition = intCond(condition);
281            this.trueValue = trueValue;
282            this.falseValue = falseValue;
283        }
284
285        @Override
286        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
287            cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
288        }
289    }
290
291    @Opcode("CMOVE")
292    public static final class FloatCondMoveOp extends AMD64LIRInstruction {
293        public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
294        @Def({REG}) protected Value result;
295        @Alive({REG}) protected Value trueValue;
296        @Alive({REG}) protected Value falseValue;
297        private final ConditionFlag condition;
298        private final boolean unorderedIsTrue;
299
300        public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
301            super(TYPE);
302            this.result = result;
303            this.condition = floatCond(condition);
304            this.unorderedIsTrue = unorderedIsTrue;
305            this.trueValue = trueValue;
306            this.falseValue = falseValue;
307        }
308
309        @Override
310        public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
311            cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
312        }
313    }
314
315    private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
316        Label endLabel = new Label();
317        if (unorderedIsTrue && !trueOnUnordered(condition)) {
318            masm.jcc(ConditionFlag.Parity, label);
319        } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
320            masm.jccb(ConditionFlag.Parity, endLabel);
321        }
322        masm.jcc(condition, label);
323        masm.bind(endLabel);
324    }
325
326    private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
327        // check that we don't overwrite an input operand before it is used.
328        assert !result.equals(trueValue);
329
330        AMD64Move.move(crb, masm, result, falseValue);
331        cmove(crb, masm, result, condition, trueValue);
332
333        if (isFloat) {
334            if (unorderedIsTrue && !trueOnUnordered(condition)) {
335                cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
336            } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
337                cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
338            }
339        }
340    }
341
342    private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
343        if (isRegister(other)) {
344            assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
345            switch (other.getKind()) {
346                case Boolean:
347                case Byte:
348                case Short:
349                case Char:
350                case Int:
351                    masm.cmovl(cond, asRegister(result), asRegister(other));
352                    break;
353                case Long:
354                    masm.cmovq(cond, asRegister(result), asRegister(other));
355                    break;
356                default:
357                    throw JVMCIError.shouldNotReachHere();
358            }
359        } else {
360            AMD64Address addr = (AMD64Address) crb.asAddress(other);
361            switch (other.getKind()) {
362                case Boolean:
363                case Byte:
364                case Short:
365                case Char:
366                case Int:
367                    masm.cmovl(cond, asRegister(result), addr);
368                    break;
369                case Long:
370                    masm.cmovq(cond, asRegister(result), addr);
371                    break;
372                default:
373                    throw JVMCIError.shouldNotReachHere();
374            }
375        }
376    }
377
378    private static ConditionFlag intCond(Condition cond) {
379        switch (cond) {
380            case EQ:
381                return ConditionFlag.Equal;
382            case NE:
383                return ConditionFlag.NotEqual;
384            case LT:
385                return ConditionFlag.Less;
386            case LE:
387                return ConditionFlag.LessEqual;
388            case GE:
389                return ConditionFlag.GreaterEqual;
390            case GT:
391                return ConditionFlag.Greater;
392            case BE:
393                return ConditionFlag.BelowEqual;
394            case AE:
395                return ConditionFlag.AboveEqual;
396            case AT:
397                return ConditionFlag.Above;
398            case BT:
399                return ConditionFlag.Below;
400            default:
401                throw JVMCIError.shouldNotReachHere();
402        }
403    }
404
405    private static ConditionFlag floatCond(Condition cond) {
406        switch (cond) {
407            case EQ:
408                return ConditionFlag.Equal;
409            case NE:
410                return ConditionFlag.NotEqual;
411            case LT:
412                return ConditionFlag.Below;
413            case LE:
414                return ConditionFlag.BelowEqual;
415            case GE:
416                return ConditionFlag.AboveEqual;
417            case GT:
418                return ConditionFlag.Above;
419            default:
420                throw JVMCIError.shouldNotReachHere();
421        }
422    }
423
424    private static boolean trueOnUnordered(ConditionFlag condition) {
425        switch (condition) {
426            case AboveEqual:
427            case NotEqual:
428            case Above:
429            case Less:
430            case Overflow:
431                return false;
432            case Equal:
433            case BelowEqual:
434            case Below:
435            case GreaterEqual:
436            case NoOverflow:
437                return true;
438            default:
439                throw JVMCIError.shouldNotReachHere();
440        }
441    }
442}