Skip to content

Instantly share code, notes, and snippets.

@syg
Last active December 14, 2015 17:28
Show Gist options
  • Save syg/5122004 to your computer and use it in GitHub Desktop.
Save syg/5122004 to your computer and use it in GitHub Desktop.
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jsion_caches_h__
#define jsion_caches_h__
#include "IonCode.h"
#include "TypeOracle.h"
#include "Registers.h"
class JSFunction;
class JSScript;
namespace js {
namespace ion {
#define IONCACHE_KIND_LIST(_) \
_(GetProperty) \
_(SetProperty) \
_(GetElement) \
_(BindName) \
_(Name) \
_(CallsiteClone)
// Forward declarations of Cache kinds.
#define FORWARD_DECLARE(kind) class kind##IC;
IONCACHE_KIND_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
class IonCacheVisitor
{
public:
#define VISIT_INS(op) \
virtual bool visit##op##IC(CodeGenerator *codegen, op##IC *) { \
JS_NOT_REACHED("NYI: " #op "IC"); \
return false; \
}
IONCACHE_KIND_LIST(VISIT_INS)
#undef VISIT_INS
};
// Common structure encoding the state of a polymorphic inline cache contained
// in the code for an IonScript. IonCaches are used for polymorphic operations
// where multiple implementations may be required.
//
// The cache is initially compiled as a patchable jump to an out of line
// fragment which invokes a cache function to perform the operation. The cache
// function may generate a stub to perform the operation in certain cases
// (e.g. a particular shape for an input object), patch the cache's jump to
// that stub and patch any failure conditions in the stub to jump back to the
// cache fragment. When those failure conditions are hit, the cache function
// may attach new stubs, forming a daisy chain of tests for how to perform the
// operation in different circumstances.
//
// Eventually, if too many stubs are generated the cache function may disable
// the cache, by generating a stub to make a call and perform the operation
// within the VM.
//
// While calls may be made to the cache function and other VM functions, the
// cache may still be treated as pure during optimization passes, such that
// LICM and GVN may be performed on operations around the cache as if the
// operation cannot reenter scripted code through an Invoke() or otherwise have
// unexpected behavior. This restricts the sorts of stubs which the cache can
// generate or the behaviors which called functions can have, and if a called
// function performs a possibly impure operation then the operation will be
// marked as such and the calling script will be recompiled.
//
// Similarly, despite the presence of functions and multiple stubs generated
// for a cache, the cache itself may be marked as idempotent and become hoisted
// or coalesced by LICM or GVN. This also constrains the stubs which can be
// generated for the cache.
//
// * IonCache usage
//
// IonCache is the base structure of an inline cache, which generates code stubs
// dynamically and attaches them to an IonScript.
//
// A cache must at least provide a static update function which will usualy have
// a JSContext*, followed by the cache index. The rest of the arguments of the
// update function are usualy corresponding to the register inputs of the cache,
// as it must perform the same operation as any of the stubs that it might
// produce. The update function call is handled by the visit function of
// CodeGenerator corresponding to this IC.
//
// The CodeGenerator visit function, as opposed to other visit functions, has
// two arguments. The first one is the OutOfLineUpdateCache which stores the LIR
// instruction. The second one is the IC object. This function would be called
// once the IC is registered with the addCache function of CodeGeneratorShared.
//
// To register a cache, you must call the addCache function as follow:
//
// MyCodeIC cache(inputReg1, inputValueReg2, outputReg);
// if (!addCache(lir, allocateCache(cache)))
// return false;
//
// Once the cache is allocated with the allocateCache function, any modification
// made to the cache would be ignored.
//
// The addCache function will produce a patchable jump at the location where
// it is called. This jump will execute generated stubs and fallback on the code
// of the visitMyCodeIC function if no stub match.
//
// Warning: As the addCache function fallback on a VMCall, calls to
// addCache should not be in the same path as another VMCall or in the same
// path of another addCache as this is not supported by the invalidation
// procedure.
class IonCache
{
public:
enum Kind {
# define DEFINE_CACHEKINDS(ickind) Cache_##ickind,
IONCACHE_KIND_LIST(DEFINE_CACHEKINDS)
# undef DEFINE_CACHEKINDS
Cache_Invalid
};
// Cache testing and cast.
# define CACHEKIND_CASTS(ickind) \
bool is##ickind() const { \
return kind() == Cache_##ickind; \
} \
inline ickind##IC &to##ickind();
IONCACHE_KIND_LIST(CACHEKIND_CASTS)
# undef CACHEKIND_CASTS
virtual Kind kind() const = 0;
virtual bool accept(CodeGenerator *codegen, IonCacheVisitor *visitor) = 0;
public:
static const char *CacheName(Kind kind);
protected:
bool pure_ : 1;
bool idempotent_ : 1;
bool disabled_ : 1;
size_t stubCount_ : 5;
CodeLocationJump initialJump_;
CodeLocationJump lastJump_;
CodeLocationLabel fallbackLabel_;
// Offset from the initial jump to the rejoin label.
#ifdef JS_CPU_ARM
static const size_t REJOIN_LABEL_OFFSET = 4;
#else
static const size_t REJOIN_LABEL_OFFSET = 0;
#endif
// Location of this operation, NULL for idempotent caches.
JSScript *script;
jsbytecode *pc;
private:
static const size_t MAX_STUBS;
void incrementStubCount() {
// The IC should stop generating stubs before wrapping stubCount.
stubCount_++;
JS_ASSERT(stubCount_);
}
CodeLocationLabel fallbackLabel() const {
return fallbackLabel_;
}
CodeLocationLabel rejoinLabel() const {
uint8_t *ptr = initialJump_.raw();
#ifdef JS_CPU_ARM
uint32_t i = 0;
while (i < REJOIN_LABEL_OFFSET)
ptr = Assembler::nextInstruction(ptr, &i);
#endif
return CodeLocationLabel(ptr);
}
public:
IonCache()
: pure_(false),
idempotent_(false),
disabled_(false),
stubCount_(0),
initialJump_(),
lastJump_(),
fallbackLabel_(),
script(NULL),
pc(NULL)
{
}
void disable();
inline bool isDisabled() const {
return disabled_;
}
// Set the initial jump state of the cache. The initialJump is the inline
// jump that will point to out-of-line code (such as the slow path, or
// stubs), and the rejoinLabel is the position that all out-of-line paths
// will rejoin to.
void setInlineJump(CodeOffsetJump initialJump, CodeOffsetLabel rejoinLabel) {
initialJump_ = initialJump;
lastJump_ = initialJump;
JS_ASSERT(rejoinLabel.offset() == initialJump.offset() + REJOIN_LABEL_OFFSET);
}
// Set the initial 'out-of-line' jump state of the cache. The fallbackLabel is
// the location of the out-of-line update (slow) path. This location will
// be set to the exitJump of the last generated stub.
void setFallbackLabel(CodeOffsetLabel fallbackLabel) {
fallbackLabel_ = fallbackLabel;
}
// Update labels once the code is copied and finalized.
void updateBaseAddress(IonCode *code, MacroAssembler &masm);
// Reset the cache around garbage collection.
void reset();
bool canAttachStub() const {
return stubCount_ < MAX_STUBS;
}
enum LinkStatus {
LINK_ERROR,
CACHE_FLUSHED,
LINK_GOOD
};
// Use the Linker to link the generated code and check if any
// monitoring/allocation caused an invalidation of the running ion script,
// this function returns CACHE_FLUSHED. In case of allocation issue this
// function returns LINK_ERROR.
LinkStatus linkCode(JSContext *cx, MacroAssembler &masm, IonScript *ion, IonCode **code);
// Fixup variables and update jumps in the list of stubs. Increment the
// number of attached stubs accordingly.
void attachStub(MacroAssembler &masm, IonCode *code, CodeOffsetJump &rejoinOffset,
CodeOffsetJump *exitOffset, CodeOffsetLabel *stubOffset = NULL);
// Combine both linkCode and attachStub into one function. In addition, it
// produces a spew augmented with the attachKind string.
bool linkAndAttachStub(JSContext *cx, MacroAssembler &masm, IonScript *ion,
const char *attachKind, CodeOffsetJump &rejoinOffset,
CodeOffsetJump *exitOffset, CodeOffsetLabel *stubOffset = NULL);
bool pure() {
return pure_;
}
bool idempotent() {
return idempotent_;
}
void setIdempotent() {
JS_ASSERT(!idempotent_);
JS_ASSERT(!script);
JS_ASSERT(!pc);
idempotent_ = true;
}
void setScriptedLocation(RawScript script, jsbytecode *pc) {
JS_ASSERT(!idempotent_);
this->script = script;
this->pc = pc;
}
void getScriptedLocation(MutableHandleScript pscript, jsbytecode **ppc) {
pscript.set(script);
*ppc = pc;
}
};
// Define the cache kind and pre-declare data structures used for calling inline
// caches.
#define CACHE_HEADER(ickind) \
Kind kind() const { \
return IonCache::Cache_##ickind; \
} \
\
bool accept(CodeGenerator *codegen, IonCacheVisitor *visitor) { \
return visitor->visit##ickind##IC(codegen, this); \
} \
\
static const VMFunction UpdateInfo;
// Subclasses of IonCache for the various kinds of caches. These do not define
// new data members; all caches must be of the same size.
// Abstract helper class to help hold state while generating readSlot and
// callGetter stubs.
class GenerateStubHelper
{
protected:
MacroAssembler &masm_;
public:
GenerateStubHelper(MacroAssembler &masm)
: masm_(masm)
{ }
virtual void branchExit(Assembler::Condition cond, Address addr, ImmGCPtr ptr) = 0;
virtual void jumpRejoin() = 0;
virtual void jumpExit() = 0;
virtual void bindFailures() = 0;
virtual void pushStubCodePatch(const ImmWord &word) = 0;
};
// Shared base class by GetPropertyIC and GetElementIC
class ReadSlotCache : public IonCache
{
protected:
void generateReadSlot(JSContext *cx, MacroAssembler &masm, JSObject *obj, PropertyName *propName,
JSObject *holder, HandleShape shape, Register object, TypedOrValueRegister output,
GenerateStubHelper &helper, Label *nonRepatchFailures = NULL);
};
class GetPropertyIC : public ReadSlotCache
{
protected:
// Registers live after the cache, excluding output registers. The initial
// value of these registers must be preserved by the cache.
RegisterSet liveRegs_;
Register object_;
PropertyName *name_;
TypedOrValueRegister output_;
bool allowGetters_ : 1;
bool hasArrayLengthStub_ : 1;
bool hasTypedArrayLengthStub_ : 1;
bool generateCallGetter(JSContext *cx, MacroAssembler &masm, JSObject *obj,
PropertyName *propName, JSObject *holder, HandleShape shape,
RegisterSet &liveRegs, Register object, TypedOrValueRegister output,
void *returnAddr, jsbytecode *pc,
GenerateStubHelper &helper, Label *nonRepatchFailures = NULL);
public:
GetPropertyIC(RegisterSet liveRegs,
Register object, PropertyName *name,
TypedOrValueRegister output,
bool allowGetters)
: liveRegs_(liveRegs),
object_(object),
name_(name),
output_(output),
allowGetters_(allowGetters),
hasArrayLengthStub_(false),
hasTypedArrayLengthStub_(false)
{
}
CACHE_HEADER(GetProperty)
Register object() const {
return object_;
}
PropertyName *name() const {
return name_;
}
TypedOrValueRegister output() const {
return output_;
}
bool allowGetters() const {
return allowGetters_;
}
bool hasArrayLengthStub() const {
return hasArrayLengthStub_;
}
bool hasTypedArrayLengthStub() const {
return hasTypedArrayLengthStub_;
}
bool attachReadSlot(JSContext *cx, IonScript *ion, JSObject *obj, JSObject *holder,
HandleShape shape);
bool attachCallGetter(JSContext *cx, IonScript *ion, JSObject *obj, JSObject *holder,
HandleShape shape,
const SafepointIndex *safepointIndex, void *returnAddr);
bool attachArrayLength(JSContext *cx, IonScript *ion, JSObject *obj);
bool attachTypedArrayLength(JSContext *cx, IonScript *ion, JSObject *obj);
static bool update(JSContext *cx, size_t cacheIndex, HandleObject obj, MutableHandleValue vp);
};
class SetPropertyIC : public IonCache
{
protected:
// Registers live after the cache, excluding output registers. The initial
// value of these registers must be preserved by the cache.
RegisterSet liveRegs_;
Register object_;
PropertyName *name_;
ConstantOrRegister value_;
bool isSetName_;
bool strict_;
public:
SetPropertyIC(RegisterSet liveRegs, Register object, PropertyName *name,
ConstantOrRegister value, bool isSetName, bool strict)
: liveRegs_(liveRegs),
object_(object),
name_(name),
value_(value),
isSetName_(isSetName),
strict_(strict)
{
}
CACHE_HEADER(SetProperty)
Register object() const {
return object_;
}
PropertyName *name() const {
return name_;
}
ConstantOrRegister value() const {
return value_;
}
bool isSetName() const {
return isSetName_;
}
bool strict() const {
return strict_;
}
bool attachNativeExisting(JSContext *cx, IonScript *ion, HandleObject obj, HandleShape shape);
bool attachSetterCall(JSContext *cx, IonScript *ion, HandleObject obj,
HandleObject holder, HandleShape shape, void *returnAddr);
bool attachNativeAdding(JSContext *cx, IonScript *ion, JSObject *obj, HandleShape oldshape,
HandleShape newshape, HandleShape propshape);
static bool
update(JSContext *cx, size_t cacheIndex, HandleObject obj, HandleValue value);
};
class GetElementIC : public ReadSlotCache
{
protected:
Register object_;
ConstantOrRegister index_;
TypedOrValueRegister output_;
bool monitoredResult_ : 1;
bool hasDenseStub_ : 1;
public:
GetElementIC(Register object, ConstantOrRegister index,
TypedOrValueRegister output, bool monitoredResult)
: object_(object),
index_(index),
output_(output),
monitoredResult_(monitoredResult),
hasDenseStub_(false)
{
}
CACHE_HEADER(GetElement)
Register object() const {
return object_;
}
ConstantOrRegister index() const {
return index_;
}
TypedOrValueRegister output() const {
return output_;
}
bool monitoredResult() const {
return monitoredResult_;
}
bool hasDenseStub() const {
return hasDenseStub_;
}
void setHasDenseStub() {
JS_ASSERT(!hasDenseStub());
hasDenseStub_ = true;
}
bool attachGetProp(JSContext *cx, IonScript *ion, HandleObject obj, const Value &idval, HandlePropertyName name);
bool attachDenseElement(JSContext *cx, IonScript *ion, JSObject *obj, const Value &idval);
bool attachTypedArrayElement(JSContext *cx, IonScript *ion, JSObject *obj, const Value &idval);
static bool
update(JSContext *cx, size_t cacheIndex, HandleObject obj, HandleValue idval,
MutableHandleValue vp);
};
class BindNameIC : public IonCache
{
protected:
Register scopeChain_;
PropertyName *name_;
Register output_;
public:
BindNameIC(Register scopeChain, PropertyName *name, Register output)
: scopeChain_(scopeChain),
name_(name),
output_(output)
{
}
CACHE_HEADER(BindName)
Register scopeChainReg() const {
return scopeChain_;
}
HandlePropertyName name() const {
return HandlePropertyName::fromMarkedLocation(&name_);
}
Register outputReg() const {
return output_;
}
bool attachGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain);
bool attachNonGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain, JSObject *holder);
static JSObject *
update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain);
};
class NameIC : public IonCache
{
protected:
bool typeOf_;
Register scopeChain_;
PropertyName *name_;
TypedOrValueRegister output_;
public:
NameIC(bool typeOf,
Register scopeChain, PropertyName *name,
TypedOrValueRegister output)
: typeOf_(typeOf),
scopeChain_(scopeChain),
name_(name),
output_(output)
{
}
CACHE_HEADER(Name)
Register scopeChainReg() const {
return scopeChain_;
}
HandlePropertyName name() const {
return HandlePropertyName::fromMarkedLocation(&name_);
}
TypedOrValueRegister outputReg() const {
return output_;
}
bool isTypeOf() const {
return typeOf_;
}
bool attach(JSContext *cx, IonScript *ion, HandleObject scopeChain, HandleObject obj,
HandleShape shape);
static bool
update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain, MutableHandleValue vp);
};
class CallsiteCloneIC : public IonCache
{
protected:
Register callee_;
Register output_;
JSScript *callScript_;
jsbytecode *callPc_;
public:
CallsiteCloneIC(Register callee, JSScript *callScript, jsbytecode *callPc, Register output)
: callee_(callee),
output_(output),
callScript_(callScript),
callPc_(callPc)
{
}
CACHE_HEADER(CallsiteClone)
Register calleeReg() const {
return callee_;
}
HandleScript callScript() const {
return HandleScript::fromMarkedLocation(&callScript_);
}
jsbytecode *callPc() const {
return callPc_;
}
Register outputReg() const {
return output_;
}
bool attach(JSContext *cx, IonScript *ion, HandleFunction original, HandleFunction clone);
static JSObject *update(JSContext *cx, size_t cacheIndex, HandleObject callee);
};
#undef CACHE_HEADER
// Implement cache casts now that the compiler can see the inheritance.
#define CACHE_CASTS(ickind) \
ickind##IC &IonCache::to##ickind() \
{ \
JS_ASSERT(is##ickind()); \
return *static_cast<ickind##IC *>(this); \
}
IONCACHE_KIND_LIST(CACHE_CASTS)
#undef OPCODE_CASTS
} // namespace ion
} // namespace js
#endif // jsion_caches_h__
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/DebugOnly.h"
#include "CodeGenerator.h"
#include "Ion.h"
#include "IonCaches.h"
#include "IonLinker.h"
#include "IonSpewer.h"
#include "VMFunctions.h"
#include "vm/Shape.h"
#include "jsinterpinlines.h"
#include "IonFrames-inl.h"
using namespace js;
using namespace js::ion;
using mozilla::DebugOnly;
void
CodeLocationJump::repoint(IonCode *code, MacroAssembler *masm)
{
JS_ASSERT(!absolute_);
size_t new_off = (size_t)raw_;
#ifdef JS_SMALL_BRANCH
size_t jumpTableEntryOffset = reinterpret_cast<size_t>(jumpTableEntry_);
#endif
if (masm != NULL) {
#ifdef JS_CPU_X64
JS_ASSERT((uint64_t)raw_ <= UINT32_MAX);
#endif
new_off = masm->actualOffset((uintptr_t)raw_);
#ifdef JS_SMALL_BRANCH
jumpTableEntryOffset = masm->actualIndex(jumpTableEntryOffset);
#endif
}
raw_ = code->raw() + new_off;
#ifdef JS_SMALL_BRANCH
jumpTableEntry_ = Assembler::PatchableJumpAddress(code, (size_t) jumpTableEntryOffset);
#endif
setAbsolute();
}
void
CodeLocationLabel::repoint(IonCode *code, MacroAssembler *masm)
{
JS_ASSERT(!absolute_);
size_t new_off = (size_t)raw_;
if (masm != NULL) {
#ifdef JS_CPU_X64
JS_ASSERT((uint64_t)raw_ <= UINT32_MAX);
#endif
new_off = masm->actualOffset((uintptr_t)raw_);
}
JS_ASSERT(new_off < code->instructionsSize());
raw_ = code->raw() + new_off;
setAbsolute();
}
void
CodeOffsetLabel::fixup(MacroAssembler *masm)
{
offset_ = masm->actualOffset(offset_);
}
void
CodeOffsetJump::fixup(MacroAssembler *masm)
{
offset_ = masm->actualOffset(offset_);
#ifdef JS_SMALL_BRANCH
jumpTableIndex_ = masm->actualIndex(jumpTableIndex_);
#endif
}
const char *
IonCache::CacheName(IonCache::Kind kind)
{
static const char *names[] =
{
#define NAME(x) #x,
IONCACHE_KIND_LIST(NAME)
#undef NAME
};
return names[kind];
}
IonCache::LinkStatus
IonCache::linkCode(JSContext *cx, MacroAssembler &masm, IonScript *ion, IonCode **code)
{
Linker linker(masm);
*code = linker.newCode(cx);
if (!code)
return LINK_ERROR;
if (ion->invalidated())
return CACHE_FLUSHED;
return LINK_GOOD;
}
const size_t IonCache::MAX_STUBS = 16;
// Value used instead of the IonCode self-reference of generated stubs. This
// value is needed for marking calls made inside stubs. This value would be
// replaced by the attachStub function after the allocation of the IonCode. The
// self-reference is used to keep the stub path alive even if the IonScript is
// invalidated or if the IC is flushed.
const ImmWord STUB_ADDR = ImmWord(uintptr_t(0xdeadc0de));
void
IonCache::attachStub(MacroAssembler &masm, IonCode *code, CodeOffsetJump &rejoinOffset,
CodeOffsetJump *exitOffset, CodeOffsetLabel *stubLabel)
{
JS_ASSERT(canAttachStub());
incrementStubCount();
rejoinOffset.fixup(&masm);
CodeLocationJump rejoinJump(code, rejoinOffset);
// Update the success path to continue after the IC initial jump.
PatchJump(rejoinJump, rejoinLabel());
// Patch the previous exitJump of the last stub, or the jump from the
// codeGen, to jump into the newly allocated code.
PatchJump(lastJump_, CodeLocationLabel(code));
// If this path is not taken, we are producing an entry which can no longer
// go back into the update function.
if (exitOffset) {
exitOffset->fixup(&masm);
CodeLocationJump exitJump(code, *exitOffset);
// When the last stub fails, it fallback to the ool call which can
// produce a stub.
PatchJump(exitJump, fallbackLabel());
// Next time we generate a stub, we will patch the exitJump to try the
// new stub.
lastJump_ = exitJump;
}
// Replace the STUB_ADDR constant by the address of the generated stub, such
// as it can be kept alive even if the cache is flushed (see
// MarkIonExitFrame).
if (stubLabel) {
stubLabel->fixup(&masm);
Assembler::patchDataWithValueCheck(CodeLocationLabel(code, *stubLabel),
ImmWord(uintptr_t(code)), STUB_ADDR);
}
}
bool
IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, IonScript *ion,
const char *attachKind, CodeOffsetJump &rejoinOffset,
CodeOffsetJump *exitOffset, CodeOffsetLabel *stubLabel)
{
IonCode *code = NULL;
LinkStatus status = linkCode(cx, masm, ion, &code);
if (status != LINK_GOOD)
return status != LINK_ERROR;
attachStub(masm, code, rejoinOffset, exitOffset, stubLabel);
IonSpew(IonSpew_InlineCaches, "Generated %s %s stub at %p",
attachKind, CacheName(kind()), code->raw());
return true;
}
static bool
IsCacheableListBase(JSObject *obj)
{
if (!obj->isProxy())
return false;
BaseProxyHandler *handler = GetProxyHandler(obj);
if (handler->family() != GetListBaseHandlerFamily())
return false;
if (obj->numFixedSlots() <= GetListBaseExpandoSlot())
return false;
return true;
}
static void
GeneratePrototypeGuards(JSContext *cx, MacroAssembler &masm, JSObject *obj, JSObject *holder,
Register objectReg, Register scratchReg, Label *failures)
{
JS_ASSERT(obj != holder);
if (obj->hasUncacheableProto()) {
// Note: objectReg and scratchReg may be the same register, so we cannot
// use objectReg in the rest of this function.
masm.loadPtr(Address(objectReg, JSObject::offsetOfType()), scratchReg);
Address proto(scratchReg, offsetof(types::TypeObject, proto));
masm.branchPtr(Assembler::NotEqual, proto, ImmGCPtr(obj->getProto()), failures);
}
JSObject *pobj = IsCacheableListBase(obj)
? obj->getTaggedProto().toObjectOrNull()
: obj->getProto();
if (!pobj)
return;
while (pobj != holder) {
if (pobj->hasUncacheableProto()) {
JS_ASSERT(!pobj->hasSingletonType());
masm.movePtr(ImmGCPtr(pobj), scratchReg);
Address objType(scratchReg, JSObject::offsetOfType());
masm.branchPtr(Assembler::NotEqual, objType, ImmGCPtr(pobj->type()), failures);
}
pobj = pobj->getProto();
}
}
static bool
IsCacheableProtoChain(JSObject *obj, JSObject *holder)
{
while (obj != holder) {
/*
* We cannot assume that we find the holder object on the prototype
* chain and must check for null proto. The prototype chain can be
* altered during the lookupProperty call.
*/
JSObject *proto = IsCacheableListBase(obj)
? obj->getTaggedProto().toObjectOrNull()
: obj->getProto();
if (!proto || !proto->isNative())
return false;
obj = proto;
}
return true;
}
static bool
IsCacheableGetPropReadSlot(JSObject *obj, JSObject *holder, RawShape shape)
{
if (!shape || !IsCacheableProtoChain(obj, holder))
return false;
if (!shape->hasSlot() || !shape->hasDefaultGetter())
return false;
return true;
}
static bool
IsCacheableNoProperty(JSObject *obj, JSObject *holder, RawShape shape, jsbytecode *pc,
const TypedOrValueRegister &output)
{
if (shape)
return false;
JS_ASSERT(!holder);
// Just because we didn't find the property on the object doesn't mean it
// won't magically appear through various engine hacks:
if (obj->getClass()->getProperty && obj->getClass()->getProperty != JS_PropertyStub)
return false;
// Don't generate missing property ICs if we skipped a non-native object, as
// lookups may extend beyond the prototype chain (e.g. for ListBase
// proxies).
JSObject *obj2 = obj;
while (obj2) {
if (!obj2->isNative())
return false;
obj2 = obj2->getProto();
}
// The pc is NULL if the cache is idempotent. We cannot share missing
// properties between caches because TI can only try to prove that a type is
// contained, but does not attempts to check if something does not exists.
// So the infered type of getprop would be missing and would not contain
// undefined, as expected for missing properties.
if (!pc)
return false;
#if JS_HAS_NO_SUCH_METHOD
// The __noSuchMethod__ hook may substitute in a valid method. Since,
// if o.m is missing, o.m() will probably be an error, just mark all
// missing callprops as uncacheable.
if (JSOp(*pc) == JSOP_CALLPROP ||
JSOp(*pc) == JSOP_CALLELEM)
{
return false;
}
#endif
// TI has not yet monitored an Undefined value. The fallback path will
// monitor and invalidate the script.
if (!output.hasValue())
return false;
return true;
}
static bool
IsCacheableGetPropCallNative(JSObject *obj, JSObject *holder, RawShape shape)
{
if (!shape || !IsCacheableProtoChain(obj, holder))
return false;
if (!shape->hasGetterValue() || !shape->getterValue().isObject())
return false;
return shape->getterValue().toObject().isFunction() &&
shape->getterValue().toObject().toFunction()->isNative();
}
static bool
IsCacheableGetPropCallPropertyOp(JSObject *obj, JSObject *holder, RawShape shape)
{
if (!shape || !IsCacheableProtoChain(obj, holder))
return false;
if (shape->hasSlot() || shape->hasGetterValue() || shape->hasDefaultGetter())
return false;
return true;
}
class StubRepatchHelper : public GenerateStubHelper
{
RepatchLabel failures_;
public:
StubRepatchHelper(MacroAssembler &masm)
: GenerateStubHelper(masm)
{ }
// Output values.
CodeOffsetJump exitOffset;
CodeOffsetJump rejoinOffset;
CodeOffsetLabel stubCodePatchOffset;
void branchExit(Assembler::Condition cond, Address addr, ImmGCPtr ptr) {
exitOffset = masm_.branchPtrWithPatch(cond, addr, ptr, &failures_);
}
void jumpRejoin() {
RepatchLabel rejoin_;
rejoinOffset = masm_.jumpWithPatch(&rejoin_);
masm_.bind(&rejoin_);
}
void jumpExit() {
RepatchLabel exit_;
exitOffset = masm_.jumpWithPatch(&exit_);
masm_.bind(&exit_);
}
void bindFailures() {
masm_.bind(&failures_);
}
void pushStubCodePatch(const ImmWord &word) {
stubCodePatchOffset = masm_.PushWithPatch(STUB_ADDR);
}
};
void
ReadSlotCache::generateReadSlot(JSContext *cx, MacroAssembler &masm, JSObject *obj,
PropertyName *propName, JSObject *holder, HandleShape shape,
Register object, TypedOrValueRegister output,
GenerateStubHelper &helper, Label *nonRepatchFailures)
{
// If there's a single jump to |failures|, we can patch the shape guard
// jump directly. Otherwise, jump to the end of the stub, so there's a
// common point to patch.
bool multipleFailureJumps = (nonRepatchFailures != NULL) && nonRepatchFailures->used();
helper.branchExit(Assembler::NotEqual,
Address(object, JSObject::offsetOfShape()),
ImmGCPtr(obj->lastProperty()));
bool restoreScratch = false;
Register scratchReg = Register::FromCode(0); // Quell compiler warning.
// If we need a scratch register, use either an output register or the object
// register (and restore it afterwards). After this point, we cannot jump
// directly to |failures| since we may still have to pop the object register.
Label prototypeFailures;
if (obj != holder || !holder->isFixedSlot(shape->slot())) {
if (output.hasValue()) {
scratchReg = output.valueReg().scratchReg();
} else if (output.type() == MIRType_Double) {
scratchReg = object;
masm.push(scratchReg);
restoreScratch = true;
} else {
scratchReg = output.typedReg().gpr();
}
}
// Generate prototype guards.
Register holderReg;
if (obj != holder) {
// Note: this may clobber the object register if it's used as scratch.
GeneratePrototypeGuards(cx, masm, obj, holder, object, scratchReg, &prototypeFailures);
if (holder) {
// Guard on the holder's shape.
holderReg = scratchReg;
masm.movePtr(ImmGCPtr(holder), holderReg);
masm.branchPtr(Assembler::NotEqual,
Address(holderReg, JSObject::offsetOfShape()),
ImmGCPtr(holder->lastProperty()),
&prototypeFailures);
} else {
// The property does not exist. Guard on everything in the
// prototype chain.
JSObject *proto = obj->getTaggedProto().toObjectOrNull();
Register lastReg = object;
JS_ASSERT(scratchReg != object);
while (proto) {
Address addrType(lastReg, JSObject::offsetOfType());
masm.loadPtr(addrType, scratchReg);
Address addrProto(scratchReg, offsetof(types::TypeObject, proto));
masm.loadPtr(addrProto, scratchReg);
Address addrShape(scratchReg, JSObject::offsetOfShape());
// Guard the shape of the current prototype.
masm.branchPtr(Assembler::NotEqual,
Address(scratchReg, JSObject::offsetOfShape()),
ImmGCPtr(proto->lastProperty()),
&prototypeFailures);
proto = proto->getProto();
lastReg = scratchReg;
}
holderReg = InvalidReg;
}
} else {
holderReg = object;
}
// Slot access.
if (holder && holder->isFixedSlot(shape->slot())) {
Address addr(holderReg, JSObject::getFixedSlotOffset(shape->slot()));
masm.loadTypedOrValue(addr, output);
} else if (holder) {
masm.loadPtr(Address(holderReg, JSObject::offsetOfSlots()), scratchReg);
Address addr(scratchReg, holder->dynamicSlotIndex(shape->slot()) * sizeof(Value));
masm.loadTypedOrValue(addr, output);
} else {
JS_ASSERT(!holder);
masm.moveValue(UndefinedValue(), output.valueReg());
}
if (restoreScratch)
masm.pop(scratchReg);
helper.jumpRejoin();
if (obj != holder || multipleFailureJumps) {
masm.bind(&prototypeFailures);
if (restoreScratch)
masm.pop(scratchReg);
helper.bindFailures();
if (multipleFailureJumps)
masm.bind(nonRepatchFailures);
helper.jumpExit();
} else {
helper.bindFailures();
}
}
bool
GetPropertyIC::attachReadSlot(JSContext *cx, IonScript *ion, JSObject *obj, JSObject *holder,
HandleShape shape)
{
MacroAssembler masm(cx);
StubRepatchHelper helper(masm);
generateReadSlot(cx, masm, obj, name(), holder, shape, object(), output(), helper);
const char *attachKind = "non idempotent reading";
if (idempotent())
attachKind = "idempotent reading";
return linkAndAttachStub(cx, masm, ion, attachKind, helper.rejoinOffset, &helper.exitOffset);
}
bool
GetPropertyIC::generateCallGetter(JSContext *cx, MacroAssembler &masm, JSObject *obj,
PropertyName *propName, JSObject *holder, HandleShape shape,
RegisterSet &liveRegs, Register object,
TypedOrValueRegister output, void *returnAddr, jsbytecode *pc,
GenerateStubHelper &helper, Label *nonRepatchFailures)
{
// Initial shape check.
Label stubFailure;
masm.branchPtr(Assembler::NotEqual, Address(object, JSObject::offsetOfShape()),
ImmGCPtr(obj->lastProperty()), &stubFailure);
// If this is a stub for a ListBase object, guard the following:
// 1. The object is a ListBase.
// 2. The object does not have expando properties, or has an expando
// which is known to not have the desired property.
if (IsCacheableListBase(obj)) {
Address handlerAddr(object, JSObject::getFixedSlotOffset(JSSLOT_PROXY_HANDLER));
Address expandoAddr(object, JSObject::getFixedSlotOffset(GetListBaseExpandoSlot()));
// Check that object is a ListBase.
masm.branchPrivatePtr(Assembler::NotEqual, handlerAddr, ImmWord(GetProxyHandler(obj)), &stubFailure);
// For the remaining code, we need to reserve some registers to load a value.
// This is ugly, but unvaoidable.
RegisterSet listBaseRegSet(RegisterSet::All());
listBaseRegSet.take(AnyRegister(object));
ValueOperand tempVal = listBaseRegSet.takeValueOperand();
masm.pushValue(tempVal);
Label failListBaseCheck;
Label listBaseOk;
Value expandoVal = obj->getFixedSlot(GetListBaseExpandoSlot());
JSObject *expando = expandoVal.isObject() ? &(expandoVal.toObject()) : NULL;
JS_ASSERT_IF(expando, expando->isNative() && expando->getProto() == NULL);
masm.loadValue(expandoAddr, tempVal);
if (expando && expando->nativeLookup(cx, propName)) {
// Reference object has an expando that doesn't define the name.
// Check incoming object's expando and make sure it's an object.
// If checkExpando is true, we'll temporarily use register(s) for a ValueOperand.
// If we do that, we save the register(s) on stack before use and pop them
// on both exit paths.
masm.branchTestObject(Assembler::NotEqual, tempVal, &failListBaseCheck);
masm.extractObject(tempVal, tempVal.scratchReg());
masm.branchPtr(Assembler::Equal,
Address(tempVal.scratchReg(), JSObject::offsetOfShape()),
ImmGCPtr(expando->lastProperty()),
&listBaseOk);
} else {
// Reference object has no expando. Check incoming object and ensure
// it has no expando.
masm.branchTestUndefined(Assembler::Equal, tempVal, &listBaseOk);
}
// Failure case: restore the tempVal registers and jump to failures.
masm.bind(&failListBaseCheck);
masm.popValue(tempVal);
masm.jump(&stubFailure);
// Success case: restore the tempval and proceed.
masm.bind(&listBaseOk);
masm.popValue(tempVal);
}
// Reserve scratch register for prototype guards.
bool restoreScratch = false;
Register scratchReg = Register::FromCode(0); // Quell compiler warning.
// If we need a scratch register, use either an output register or the object
// register (and restore it afterwards). After this point, we cannot jump
// directly to |stubFailure| since we may still have to pop the object register.
Label prototypeFailures;
JS_ASSERT(output.hasValue());
scratchReg = output.valueReg().scratchReg();
// Note: this may clobber the object register if it's used as scratch.
if (obj != holder)
GeneratePrototypeGuards(cx, masm, obj, holder, object, scratchReg, &prototypeFailures);
// Guard on the holder's shape.
Register holderReg = scratchReg;
masm.movePtr(ImmGCPtr(holder), holderReg);
masm.branchPtr(Assembler::NotEqual,
Address(holderReg, JSObject::offsetOfShape()),
ImmGCPtr(holder->lastProperty()),
&prototypeFailures);
if (restoreScratch)
masm.pop(scratchReg);
// Now we're good to go to invoke the native call.
// saveLive()
masm.PushRegsInMask(liveRegs);
// Remaining registers should basically be free, but we need to use |object| still
// so leave it alone.
RegisterSet regSet(RegisterSet::All());
regSet.take(AnyRegister(object));
// This is a slower stub path, and we're going to be doing a call anyway. Don't need
// to try so hard to not use the stack. Scratch regs are just taken from the register
// set not including the input, current value saved on the stack, and restored when
// we're done with it.
scratchReg = regSet.takeGeneral();
Register argJSContextReg = regSet.takeGeneral();
Register argUintNReg = regSet.takeGeneral();
Register argVpReg = regSet.takeGeneral();
// Shape has a getter function.
bool callNative = IsCacheableGetPropCallNative(obj, holder, shape);
JS_ASSERT_IF(!callNative, IsCacheableGetPropCallPropertyOp(obj, holder, shape));
// TODO: ensure stack is aligned?
DebugOnly<uint32_t> initialStack = masm.framePushed();
Label success, exception;
// Push the IonCode pointer for the stub we're generating.
// WARNING:
// WARNING: If IonCode ever becomes relocatable, the following code is incorrect.
// WARNING: Note that we're not marking the pointer being pushed as an ImmGCPtr.
// WARNING: This is not a marking issue since the stub IonCode won't be collected
// WARNING: between the time it's called and when we get here, but it would fail
// WARNING: if the IonCode object ever moved, since we'd be rooting a nonsense
// WARNING: value here.
// WARNING:
helper.pushStubCodePatch(STUB_ADDR);
if (callNative) {
JS_ASSERT(shape->hasGetterValue() && shape->getterValue().isObject() &&
shape->getterValue().toObject().isFunction());
JSFunction *target = shape->getterValue().toObject().toFunction();
JS_ASSERT(target);
JS_ASSERT(target->isNative());
// Native functions have the signature:
// bool (*)(JSContext *, unsigned, Value *vp)
// Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
// are the function arguments.
// Construct vp array:
// Push object value for |this|
masm.Push(TypedOrValueRegister(MIRType_Object, AnyRegister(object)));
// Push callee/outparam.
masm.Push(ObjectValue(*target));
// Preload arguments into registers.
masm.loadJSContext(argJSContextReg);
masm.move32(Imm32(0), argUintNReg);
masm.movePtr(StackPointer, argVpReg);
if (!masm.buildOOLFakeExitFrame(returnAddr))
return false;
masm.enterFakeExitFrame(ION_FRAME_OOL_NATIVE_GETTER);
// Construct and execute call.
masm.setupUnalignedABICall(3, scratchReg);
masm.passABIArg(argJSContextReg);
masm.passABIArg(argUintNReg);
masm.passABIArg(argVpReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->native()));
// Test for failure.
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &exception);
// Load the outparam vp[0] into output register(s).
masm.loadValue(
Address(StackPointer, IonOOLNativeGetterExitFrameLayout::offsetOfResult()),
JSReturnOperand);
} else {
Register argObjReg = argUintNReg;
Register argIdReg = regSet.takeGeneral();
PropertyOp target = shape->getterOp();
JS_ASSERT(target);
// JSPropertyOp: JSBool fn(JSContext *cx, JSHandleObject obj, JSHandleId id, JSMutableHandleValue vp)
// Push args on stack first so we can take pointers to make handles.
masm.Push(UndefinedValue());
masm.movePtr(StackPointer, argVpReg);
// push canonical jsid from shape instead of propertyname.
RootedId propId(cx);
if (!shape->getUserId(cx, &propId))
return false;
masm.Push(propId, scratchReg);
masm.movePtr(StackPointer, argIdReg);
masm.Push(object);
masm.movePtr(StackPointer, argObjReg);
masm.loadJSContext(argJSContextReg);
if (!masm.buildOOLFakeExitFrame(returnAddr))
return false;
masm.enterFakeExitFrame(ION_FRAME_OOL_PROPERTY_OP);
// Make the call.
masm.setupUnalignedABICall(4, scratchReg);
masm.passABIArg(argJSContextReg);
masm.passABIArg(argObjReg);
masm.passABIArg(argIdReg);
masm.passABIArg(argVpReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target));
// Test for failure.
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &exception);
// Load the outparam vp[0] into output register(s).
masm.loadValue(
Address(StackPointer, IonOOLPropertyOpExitFrameLayout::offsetOfResult()),
JSReturnOperand);
}
// If generating getter call stubs, then return type MUST have been generalized
// to MIRType_Value.
masm.jump(&success);
// Handle exception case.
masm.bind(&exception);
masm.handleException();
// Handle success case.
masm.bind(&success);
masm.storeCallResultValue(output);
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
// Move the StackPointer back to its original location, unwinding the native exit frame.
if (callNative)
masm.adjustStack(IonOOLNativeGetterExitFrameLayout::Size());
else
masm.adjustStack(IonOOLPropertyOpExitFrameLayout::Size());
JS_ASSERT(masm.framePushed() == initialStack);
// restoreLive()
masm.PopRegsInMask(liveRegs);
// Rejoin jump.
helper.jumpRejoin();
// Exit jump.
masm.bind(&prototypeFailures);
if (restoreScratch)
masm.pop(scratchReg);
masm.bind(&stubFailure);
if (nonRepatchFailures)
masm.bind(nonRepatchFailures);
helper.jumpExit();
return true;
}
bool
GetPropertyIC::attachCallGetter(JSContext *cx, IonScript *ion, JSObject *obj,
JSObject *holder, HandleShape shape,
const SafepointIndex *safepointIndex, void *returnAddr)
{
MacroAssembler masm(cx);
JS_ASSERT(!idempotent());
JS_ASSERT(allowGetters());
// Need to set correct framePushed on the masm so that exit frame descriptors are
// properly constructed.
masm.setFramePushed(ion->frameSize());
StubRepatchHelper helper(masm);
if (!generateCallGetter(cx, masm, obj, name(), holder, shape, liveRegs_,
object(), output(), returnAddr, pc, helper))
{
return false;
}
const char *attachKind = "non idempotent calling";
if (idempotent())
attachKind = "idempotent calling";
return linkAndAttachStub(cx, masm, ion, attachKind, helper.rejoinOffset, &helper.exitOffset,
&helper.stubCodePatchOffset);
}
bool
GetPropertyIC::attachArrayLength(JSContext *cx, IonScript *ion, JSObject *obj)
{
JS_ASSERT(obj->isArray());
JS_ASSERT(!idempotent());
Label failures;
MacroAssembler masm(cx);
// Guard object is a dense array.
RootedObject globalObj(cx, &script->global());
RootedShape shape(cx, obj->lastProperty());
if (!shape)
return false;
masm.branchTestObjShape(Assembler::NotEqual, object(), shape, &failures);
// Load length.
Register outReg;
if (output().hasValue()) {
outReg = output().valueReg().scratchReg();
} else {
JS_ASSERT(output().type() == MIRType_Int32);
outReg = output().typedReg().gpr();
}
masm.loadPtr(Address(object(), JSObject::offsetOfElements()), outReg);
masm.load32(Address(outReg, ObjectElements::offsetOfLength()), outReg);
// The length is an unsigned int, but the value encodes a signed int.
JS_ASSERT(object() != outReg);
masm.branchTest32(Assembler::Signed, outReg, outReg, &failures);
if (output().hasValue())
masm.tagValue(JSVAL_TYPE_INT32, outReg, output().valueReg());
/* Success. */
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
/* Failure. */
masm.bind(&failures);
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
JS_ASSERT(!hasArrayLengthStub_);
hasArrayLengthStub_ = true;
return linkAndAttachStub(cx, masm, ion, "array length", rejoinOffset, &exitOffset);
}
bool
GetPropertyIC::attachTypedArrayLength(JSContext *cx, IonScript *ion, JSObject *obj)
{
JS_ASSERT(obj->isTypedArray());
JS_ASSERT(!idempotent());
Label failures;
MacroAssembler masm(cx);
Register tmpReg;
if (output().hasValue()) {
tmpReg = output().valueReg().scratchReg();
} else {
JS_ASSERT(output().type() == MIRType_Int32);
tmpReg = output().typedReg().gpr();
}
JS_ASSERT(object() != tmpReg);
// Implement the negated version of JSObject::isTypedArray predicate.
masm.loadObjClass(object(), tmpReg);
masm.branchPtr(Assembler::Below, tmpReg, ImmWord(&TypedArray::classes[0]), &failures);
masm.branchPtr(Assembler::AboveOrEqual, tmpReg, ImmWord(&TypedArray::classes[TypedArray::TYPE_MAX]), &failures);
// Load length.
masm.loadTypedOrValue(Address(object(), TypedArray::lengthOffset()), output());
/* Success. */
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
/* Failure. */
masm.bind(&failures);
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
JS_ASSERT(!hasTypedArrayLengthStub_);
hasTypedArrayLengthStub_ = true;
return linkAndAttachStub(cx, masm, ion, "typed array length", rejoinOffset, &exitOffset);
}
static bool
TryAttachNativeGetPropStub(JSContext *cx, IonScript *ion,
GetPropertyIC &cache, HandleObject obj,
HandlePropertyName name,
const SafepointIndex *safepointIndex,
void *returnAddr, bool *isCacheable)
{
JS_ASSERT(!*isCacheable);
RootedObject checkObj(cx, obj);
bool isListBase = IsCacheableListBase(obj);
if (isListBase)
checkObj = obj->getTaggedProto().toObjectOrNull();
if (!checkObj || !checkObj->isNative())
return true;
// If the cache is idempotent, watch out for resolve hooks or non-native
// objects on the proto chain. We check this before calling lookupProperty,
// to make sure no effectful lookup hooks or resolve hooks are called.
if (cache.idempotent() && !checkObj->hasIdempotentProtoChain())
return true;
RootedShape shape(cx);
RootedObject holder(cx);
if (!JSObject::lookupProperty(cx, checkObj, name, &holder, &shape))
return false;
// Check what kind of cache stub we can emit: either a slot read,
// or a getter call.
bool readSlot = false;
bool callGetter = false;
RootedScript script(cx);
jsbytecode *pc;
cache.getScriptedLocation(&script, &pc);
if (IsCacheableGetPropReadSlot(obj, holder, shape) ||
IsCacheableNoProperty(obj, holder, shape, pc, cache.output()))
{
// With Proxies, we cannot garantee any property access as the proxy can
// mask any property from the prototype chain.
if (!obj->isProxy())
readSlot = true;
} else if (IsCacheableGetPropCallNative(checkObj, holder, shape) ||
IsCacheableGetPropCallPropertyOp(checkObj, holder, shape))
{
// Don't enable getter call if cache is idempotent, since
// they can be effectful.
if (!cache.idempotent() && cache.allowGetters())
callGetter = true;
}
// Only continue if one of the cache methods is viable.
if (!readSlot && !callGetter)
return true;
// TI infers the possible types of native object properties. There's one
// edge case though: for singleton objects it does not add the initial
// "undefined" type, see the propertySet comment in jsinfer.h. We can't
// monitor the return type inside an idempotent cache though, so we don't
// handle this case.
if (cache.idempotent() &&
holder &&
holder->hasSingletonType() &&
holder->getSlot(shape->slot()).isUndefined())
{
return true;
}
*isCacheable = true;
// readSlot and callGetter are mutually exclusive
JS_ASSERT_IF(readSlot, !callGetter);
JS_ASSERT_IF(callGetter, !readSlot);
// Falback to the interpreter function.
if (!cache.canAttachStub())
return true;
if (readSlot)
return cache.attachReadSlot(cx, ion, obj, holder, shape);
else if (obj->isArray() && !cache.hasArrayLengthStub() && cx->names().length == name)
return cache.attachArrayLength(cx, ion, obj);
return cache.attachCallGetter(cx, ion, obj, holder, shape, safepointIndex, returnAddr);
}
bool
GetPropertyIC::update(JSContext *cx, size_t cacheIndex,
HandleObject obj, MutableHandleValue vp)
{
AutoFlushCache afc ("GetPropertyCache");
const SafepointIndex *safepointIndex;
void *returnAddr;
RootedScript topScript(cx, GetTopIonJSScript(cx, &safepointIndex, &returnAddr));
IonScript *ion = topScript->ionScript();
GetPropertyIC &cache = ion->getCache(cacheIndex).toGetProperty();
RootedPropertyName name(cx, cache.name());
// Override the return value if we are invalidated (bug 728188).
AutoDetectInvalidation adi(cx, vp.address(), ion);
// If the cache is idempotent, we will redo the op in the interpreter.
if (cache.idempotent())
adi.disable();
// For now, just stop generating new stubs once we hit the stub count
// limit. Once we can make calls from within generated stubs, a new call
// stub will be generated instead and the previous stubs unlinked.
bool isCacheable = false;
if (!TryAttachNativeGetPropStub(cx, ion, cache, obj, name,
safepointIndex, returnAddr,
&isCacheable))
{
return false;
}
if (!isCacheable && cache.canAttachStub() &&
!cache.idempotent() && cx->names().length == name)
{
if (cache.output().type() != MIRType_Value && cache.output().type() != MIRType_Int32) {
// The next execution should cause an invalidation because the type
// does not fit.
isCacheable = false;
} else if (obj->isTypedArray() && !cache.hasTypedArrayLengthStub()) {
isCacheable = true;
if (!cache.attachTypedArrayLength(cx, ion, obj))
return false;
}
}
if (cache.idempotent() && !isCacheable) {
// Invalidate the cache if the property was not found, or was found on
// a non-native object. This ensures:
// 1) The property read has no observable side-effects.
// 2) There's no need to dynamically monitor the return type. This would
// be complicated since (due to GVN) there can be multiple pc's
// associated with a single idempotent cache.
IonSpew(IonSpew_InlineCaches, "Invalidating from idempotent cache %s:%d",
topScript->filename, topScript->lineno);
topScript->invalidatedIdempotentCache = true;
// Do not re-invalidate if the lookup already caused invalidation.
if (!topScript->hasIonScript())
return true;
return Invalidate(cx, topScript);
}
RootedId id(cx, NameToId(name));
if (obj->getOps()->getProperty) {
if (!JSObject::getGeneric(cx, obj, obj, id, vp))
return false;
} else {
if (!GetPropertyHelper(cx, obj, id, 0, vp))
return false;
}
if (!cache.idempotent()) {
RootedScript script(cx);
jsbytecode *pc;
cache.getScriptedLocation(&script, &pc);
// If the cache is idempotent, the property exists so we don't have to
// call __noSuchMethod__.
#if JS_HAS_NO_SUCH_METHOD
// Handle objects with __noSuchMethod__.
if (JSOp(*pc) == JSOP_CALLPROP && JS_UNLIKELY(vp.isPrimitive())) {
if (!OnUnknownMethod(cx, obj, IdToValue(id), vp))
return false;
}
#endif
// Monitor changes to cache entry.
types::TypeScript::Monitor(cx, script, pc, vp);
}
return true;
}
void
IonCache::updateBaseAddress(IonCode *code, MacroAssembler &masm)
{
initialJump_.repoint(code, &masm);
lastJump_.repoint(code, &masm);
fallbackLabel_.repoint(code, &masm);
}
void
IonCache::disable()
{
reset();
this->disabled_ = 1;
}
void
IonCache::reset()
{
// Skip all generated stub by patching the original stub to go directly to
// the update function.
PatchJump(initialJump_, fallbackLabel_);
this->stubCount_ = 0;
this->lastJump_ = initialJump_;
}
bool
SetPropertyIC::attachNativeExisting(JSContext *cx, IonScript *ion,
HandleObject obj, HandleShape shape)
{
MacroAssembler masm(cx);
RepatchLabel exit_;
CodeOffsetJump exitOffset =
masm.branchPtrWithPatch(Assembler::NotEqual,
Address(object(), JSObject::offsetOfShape()),
ImmGCPtr(obj->lastProperty()),
&exit_);
masm.bind(&exit_);
if (obj->isFixedSlot(shape->slot())) {
Address addr(object(), JSObject::getFixedSlotOffset(shape->slot()));
if (cx->zone()->needsBarrier())
masm.callPreBarrier(addr, MIRType_Value);
masm.storeConstantOrRegister(value(), addr);
} else {
Register slotsReg = object();
masm.loadPtr(Address(object(), JSObject::offsetOfSlots()), slotsReg);
Address addr(slotsReg, obj->dynamicSlotIndex(shape->slot()) * sizeof(Value));
if (cx->zone()->needsBarrier())
masm.callPreBarrier(addr, MIRType_Value);
masm.storeConstantOrRegister(value(), addr);
}
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
return linkAndAttachStub(cx, masm, ion, "setting", rejoinOffset, &exitOffset);
}
bool
SetPropertyIC::attachSetterCall(JSContext *cx, IonScript *ion,
HandleObject obj, HandleObject holder, HandleShape shape,
void *returnAddr)
{
MacroAssembler masm(cx);
// Need to set correct framePushed on the masm so that exit frame descriptors are
// properly constructed.
masm.setFramePushed(ion->frameSize());
Label failure;
masm.branchPtr(Assembler::NotEqual,
Address(object(), JSObject::offsetOfShape()),
ImmGCPtr(obj->lastProperty()),
&failure);
// Generate prototype guards if needed.
// Take a scratch register for use, save on stack.
{
RegisterSet regSet(RegisterSet::All());
regSet.take(AnyRegister(object()));
if (!value().constant())
regSet.maybeTake(value().reg());
Register scratchReg = regSet.takeGeneral();
masm.push(scratchReg);
Label protoFailure;
Label protoSuccess;
// Generate prototype/shape guards.
if (obj != holder)
GeneratePrototypeGuards(cx, masm, obj, holder, object(), scratchReg, &protoFailure);
masm.movePtr(ImmGCPtr(holder), scratchReg);
masm.branchPtr(Assembler::NotEqual,
Address(scratchReg, JSObject::offsetOfShape()),
ImmGCPtr(holder->lastProperty()),
&protoFailure);
masm.jump(&protoSuccess);
masm.bind(&protoFailure);
masm.pop(scratchReg);
masm.jump(&failure);
masm.bind(&protoSuccess);
masm.pop(scratchReg);
}
// Good to go for invoking setter.
// saveLive()
masm.PushRegsInMask(liveRegs_);
// Remaining registers should basically be free, but we need to use |object| still
// so leave it alone.
RegisterSet regSet(RegisterSet::All());
regSet.take(AnyRegister(object()));
// This is a slower stub path, and we're going to be doing a call anyway. Don't need
// to try so hard to not use the stack. Scratch regs are just taken from the register
// set not including the input, current value saved on the stack, and restored when
// we're done with it.
Register scratchReg = regSet.takeGeneral();
Register argJSContextReg = regSet.takeGeneral();
Register argObjReg = regSet.takeGeneral();
Register argIdReg = regSet.takeGeneral();
Register argStrictReg = regSet.takeGeneral();
Register argVpReg = regSet.takeGeneral();
// Ensure stack is aligned.
DebugOnly<uint32_t> initialStack = masm.framePushed();
Label success, exception;
// Push the IonCode pointer for the stub we're generating.
// WARNING:
// WARNING: If IonCode ever becomes relocatable, the following code is incorrect.
// WARNING: Note that we're not marking the pointer being pushed as an ImmGCPtr.
// WARNING: This is not a marking issue since the stub IonCode won't be collected
// WARNING: between the time it's called and when we get here, but it would fail
// WARNING: if the IonCode object ever moved, since we'd be rooting a nonsense
// WARNING: value here.
// WARNING:
CodeOffsetLabel stubCodePatchOffset = masm.PushWithPatch(STUB_ADDR);
StrictPropertyOp target = shape->setterOp();
JS_ASSERT(target);
// JSStrictPropertyOp: JSBool fn(JSContext *cx, JSHandleObject obj,
// JSHandleId id, JSBool strict, JSMutableHandleValue vp);
// Push args on stack first so we can take pointers to make handles.
if (value().constant())
masm.Push(value().value());
else
masm.Push(value().reg());
masm.movePtr(StackPointer, argVpReg);
masm.move32(Imm32(strict() ? 1 : 0), argStrictReg);
// push canonical jsid from shape instead of propertyname.
RootedId propId(cx);
if (!shape->getUserId(cx, &propId))
return false;
masm.Push(propId, argIdReg);
masm.movePtr(StackPointer, argIdReg);
masm.Push(object());
masm.movePtr(StackPointer, argObjReg);
masm.loadJSContext(argJSContextReg);
if (!masm.buildOOLFakeExitFrame(returnAddr))
return false;
masm.enterFakeExitFrame(ION_FRAME_OOL_PROPERTY_OP);
// Make the call.
masm.setupUnalignedABICall(5, scratchReg);
masm.passABIArg(argJSContextReg);
masm.passABIArg(argObjReg);
masm.passABIArg(argIdReg);
masm.passABIArg(argStrictReg);
masm.passABIArg(argVpReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target));
// Test for failure.
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &exception);
masm.jump(&success);
// Handle exception case.
masm.bind(&exception);
masm.handleException();
// Handle success case.
masm.bind(&success);
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
// Move the StackPointer back to its original location, unwinding the exit frame.
masm.adjustStack(IonOOLPropertyOpExitFrameLayout::Size());
JS_ASSERT(masm.framePushed() == initialStack);
// restoreLive()
masm.PopRegsInMask(liveRegs_);
// Rejoin jump.
RepatchLabel rejoin;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin);
masm.bind(&rejoin);
// Exit jump.
masm.bind(&failure);
RepatchLabel exit;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit);
masm.bind(&exit);
return linkAndAttachStub(cx, masm, ion, "calling", rejoinOffset, &exitOffset,
&stubCodePatchOffset);
}
bool
SetPropertyIC::attachNativeAdding(JSContext *cx, IonScript *ion, JSObject *obj,
HandleShape oldShape, HandleShape newShape,
HandleShape propShape)
{
MacroAssembler masm(cx);
Label failures;
/* Guard the type of the object */
masm.branchPtr(Assembler::NotEqual, Address(object(), JSObject::offsetOfType()),
ImmGCPtr(obj->type()), &failures);
/* Guard shapes along prototype chain. */
masm.branchTestObjShape(Assembler::NotEqual, object(), oldShape, &failures);
Label protoFailures;
masm.push(object()); // save object reg because we clobber it
JSObject *proto = obj->getProto();
Register protoReg = object();
while (proto) {
RawShape protoShape = proto->lastProperty();
// load next prototype
masm.loadPtr(Address(protoReg, JSObject::offsetOfType()), protoReg);
masm.loadPtr(Address(protoReg, offsetof(types::TypeObject, proto)), protoReg);
// ensure that the prototype is not NULL and that its shape matches
masm.branchTestPtr(Assembler::Zero, protoReg, protoReg, &protoFailures);
masm.branchTestObjShape(Assembler::NotEqual, protoReg, protoShape, &protoFailures);
proto = proto->getProto();
}
masm.pop(object()); // restore object reg
/* Changing object shape. Write the object's new shape. */
Address shapeAddr(object(), JSObject::offsetOfShape());
if (cx->zone()->needsBarrier())
masm.callPreBarrier(shapeAddr, MIRType_Shape);
masm.storePtr(ImmGCPtr(newShape), shapeAddr);
/* Set the value on the object. */
if (obj->isFixedSlot(propShape->slot())) {
Address addr(object(), JSObject::getFixedSlotOffset(propShape->slot()));
masm.storeConstantOrRegister(value(), addr);
} else {
Register slotsReg = object();
masm.loadPtr(Address(object(), JSObject::offsetOfSlots()), slotsReg);
Address addr(slotsReg, obj->dynamicSlotIndex(propShape->slot()) * sizeof(Value));
masm.storeConstantOrRegister(value(), addr);
}
/* Success. */
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
/* Failure. */
masm.bind(&protoFailures);
masm.pop(object());
masm.bind(&failures);
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
return linkAndAttachStub(cx, masm, ion, "adding", rejoinOffset, &exitOffset);
}
static bool
IsPropertyInlineable(JSObject *obj)
{
if (!obj->isNative())
return false;
if (obj->watched())
return false;
return true;
}
static bool
IsPropertySetInlineable(JSContext *cx, HandleObject obj, HandleId id, MutableHandleShape pshape)
{
RawShape shape = obj->nativeLookup(cx, id);
if (!shape)
return false;
if (!shape->hasSlot())
return false;
if (!shape->hasDefaultSetter())
return false;
if (!shape->writable())
return false;
pshape.set(shape);
return true;
}
static bool
IsPropertySetterCallInlineable(JSContext *cx, HandleObject obj, HandleObject holder,
HandleShape shape)
{
if (!shape)
return false;
if (!holder->isNative())
return false;
if (shape->hasSlot())
return false;
if (shape->hasDefaultSetter())
return false;
if (!shape->writable())
return false;
// We only handle propertyOps for now, so fail if we have SetterValue
// (which implies JSNative setter).
if (shape->hasSetterValue())
return false;
return true;
}
static bool
IsPropertyAddInlineable(JSContext *cx, HandleObject obj, HandleId id, uint32_t oldSlots,
MutableHandleShape pShape)
{
// This is not a Add, the property exists.
if (pShape.get())
return false;
RootedShape shape(cx, obj->nativeLookup(cx, id));
if (!shape || shape->inDictionary() || !shape->hasSlot() || !shape->hasDefaultSetter())
return false;
// If object has a non-default resolve hook, don't inline
if (obj->getClass()->resolve != JS_ResolveStub)
return false;
if (!obj->isExtensible() || !shape->writable())
return false;
// walk up the object prototype chain and ensure that all prototypes
// are native, and that all prototypes have no getter or setter
// defined on the property
for (JSObject *proto = obj->getProto(); proto; proto = proto->getProto()) {
// if prototype is non-native, don't optimize
if (!proto->isNative())
return false;
// if prototype defines this property in a non-plain way, don't optimize
RawShape protoShape = proto->nativeLookup(cx, id);
if (protoShape && !protoShape->hasDefaultSetter())
return false;
// Otherise, if there's no such property, watch out for a resolve hook that would need
// to be invoked and thus prevent inlining of property addition.
if (proto->getClass()->resolve != JS_ResolveStub)
return false;
}
// Only add a IC entry if the dynamic slots didn't change when the shapes
// changed. Need to ensure that a shape change for a subsequent object
// won't involve reallocating the slot array.
if (obj->numDynamicSlots() != oldSlots)
return false;
pShape.set(shape);
return true;
}
bool
SetPropertyIC::update(JSContext *cx, size_t cacheIndex, HandleObject obj,
HandleValue value)
{
AutoFlushCache afc ("SetPropertyCache");
void *returnAddr;
const SafepointIndex *safepointIndex;
RootedScript script(cx, GetTopIonJSScript(cx, &safepointIndex, &returnAddr));
IonScript *ion = script->ion;
SetPropertyIC &cache = ion->getCache(cacheIndex).toSetProperty();
RootedPropertyName name(cx, cache.name());
RootedId id(cx, AtomToId(name));
RootedShape shape(cx);
RootedObject holder(cx);
// Stop generating new stubs once we hit the stub count limit, see
// GetPropertyCache.
bool inlinable = cache.canAttachStub() && IsPropertyInlineable(obj);
bool addedSetterStub = false;
if (inlinable) {
RootedShape shape(cx);
if (IsPropertySetInlineable(cx, obj, id, &shape)) {
if (!cache.attachNativeExisting(cx, ion, obj, shape))
return false;
addedSetterStub = true;
} else {
RootedObject holder(cx);
if (!JSObject::lookupProperty(cx, obj, name, &holder, &shape))
return false;
if (IsPropertySetterCallInlineable(cx, obj, holder, shape)) {
if (!cache.attachSetterCall(cx, ion, obj, holder, shape, returnAddr))
return false;
addedSetterStub = true;
}
}
}
uint32_t oldSlots = obj->numDynamicSlots();
RootedShape oldShape(cx, obj->lastProperty());
// Set/Add the property on the object, the inlined cache are setup for the next execution.
if (!SetProperty(cx, obj, name, value, cache.strict(), cache.isSetName()))
return false;
// The property did not exist before, now we can try to inline the property add.
if (inlinable && !addedSetterStub && obj->lastProperty() != oldShape &&
IsPropertyAddInlineable(cx, obj, id, oldSlots, &shape))
{
RootedShape newShape(cx, obj->lastProperty());
if (!cache.attachNativeAdding(cx, ion, obj, oldShape, newShape, shape))
return false;
}
return true;
}
bool
GetElementIC::attachGetProp(JSContext *cx, IonScript *ion, HandleObject obj,
const Value &idval, HandlePropertyName name)
{
JS_ASSERT(index().reg().hasValue());
RootedObject holder(cx);
RootedShape shape(cx);
if (!JSObject::lookupProperty(cx, obj, name, &holder, &shape))
return false;
RootedScript script(cx);
jsbytecode *pc;
getScriptedLocation(&script, &pc);
if (!IsCacheableGetPropReadSlot(obj, holder, shape) &&
!IsCacheableNoProperty(obj, holder, shape, pc, output())) {
IonSpew(IonSpew_InlineCaches, "GETELEM uncacheable property");
return true;
}
JS_ASSERT(idval.isString());
Label nonRepatchFailures;
MacroAssembler masm(cx);
// Guard on the index value.
ValueOperand val = index().reg().valueReg();
masm.branchTestValue(Assembler::NotEqual, val, idval, &nonRepatchFailures);
StubRepatchHelper helper(masm);
generateReadSlot(cx, masm, obj, name, holder, shape, object(), output(), helper,
&nonRepatchFailures);
return linkAndAttachStub(cx, masm, ion, "property", helper.rejoinOffset, &helper.exitOffset);
}
bool
GetElementIC::attachDenseElement(JSContext *cx, IonScript *ion, JSObject *obj, const Value &idval)
{
JS_ASSERT(obj->isNative());
JS_ASSERT(idval.isInt32());
Label failures;
MacroAssembler masm(cx);
Register scratchReg = output().scratchReg().gpr();
JS_ASSERT(scratchReg != InvalidReg);
// Guard object's shape.
RootedObject globalObj(cx, &script->global());
RootedShape shape(cx, obj->lastProperty());
if (!shape)
return false;
masm.branchTestObjShape(Assembler::NotEqual, object(), shape, &failures);
// Ensure the index is an int32 value.
Register indexReg = InvalidReg;
if (index().reg().hasValue()) {
indexReg = output().scratchReg().gpr();
JS_ASSERT(indexReg != InvalidReg);
ValueOperand val = index().reg().valueReg();
masm.branchTestInt32(Assembler::NotEqual, val, &failures);
// Unbox the index.
masm.unboxInt32(val, indexReg);
} else {
JS_ASSERT(!index().reg().typedReg().isFloat());
indexReg = index().reg().typedReg().gpr();
}
// Load elements vector.
masm.push(object());
masm.loadPtr(Address(object(), JSObject::offsetOfElements()), object());
Label hole;
// Guard on the initialized length.
Address initLength(object(), ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::BelowOrEqual, initLength, indexReg, &hole);
// Check for holes & load the value.
masm.loadElementTypedOrValue(BaseIndex(object(), indexReg, TimesEight),
output(), true, &hole);
masm.pop(object());
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
// All failures flow to here.
masm.bind(&hole);
masm.pop(object());
masm.bind(&failures);
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
setHasDenseStub();
return linkAndAttachStub(cx, masm, ion, "dense array", rejoinOffset, &exitOffset);
}
bool
GetElementIC::attachTypedArrayElement(JSContext *cx, IonScript *ion, JSObject *obj,
const Value &idval)
{
JS_ASSERT(obj->isTypedArray());
JS_ASSERT(idval.isInt32());
Label failures;
MacroAssembler masm(cx);
// The array type is the object within the table of typed array classes.
int arrayType = TypedArray::type(obj);
// The output register is not yet specialized as a float register, the only
// way to accept float typed arrays for now is to return a Value type.
bool floatOutput = arrayType == TypedArray::TYPE_FLOAT32 ||
arrayType == TypedArray::TYPE_FLOAT64;
JS_ASSERT_IF(!output().hasValue(), !floatOutput);
Register tmpReg = output().scratchReg().gpr();
JS_ASSERT(tmpReg != InvalidReg);
// Check that the typed array is of the same type as the current object
// because load size differ in function of the typed array data width.
masm.branchTestObjClass(Assembler::NotEqual, object(), tmpReg, obj->getClass(), &failures);
// Ensure the index is an int32 value.
Register indexReg = tmpReg;
JS_ASSERT(!index().constant());
if (index().reg().hasValue()) {
ValueOperand val = index().reg().valueReg();
masm.branchTestInt32(Assembler::NotEqual, val, &failures);
// Unbox the index.
masm.unboxInt32(val, indexReg);
} else {
JS_ASSERT(!index().reg().typedReg().isFloat());
indexReg = index().reg().typedReg().gpr();
}
// Guard on the initialized length.
Address length(object(), TypedArray::lengthOffset());
masm.branch32(Assembler::BelowOrEqual, length, indexReg, &failures);
// Save the object register on the stack in case of failure.
Label popAndFail;
Register elementReg = object();
masm.push(object());
// Load elements vector.
masm.loadPtr(Address(object(), TypedArray::dataOffset()), elementReg);
// Load the value. We use an invalid register because the destination
// register is necessary a non double register.
int width = TypedArray::slotWidth(arrayType);
BaseIndex source(elementReg, indexReg, ScaleFromElemWidth(width));
if (output().hasValue())
masm.loadFromTypedArray(arrayType, source, output().valueReg(), true,
elementReg, &popAndFail);
else
masm.loadFromTypedArray(arrayType, source, output().typedReg(),
elementReg, &popAndFail);
masm.pop(object());
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
// Restore the object before continuing to the next stub.
masm.bind(&popAndFail);
masm.pop(object());
masm.bind(&failures);
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
return linkAndAttachStub(cx, masm, ion, "typed array", rejoinOffset, &exitOffset);
}
bool
GetElementIC::update(JSContext *cx, size_t cacheIndex, HandleObject obj,
HandleValue idval, MutableHandleValue res)
{
IonScript *ion = GetTopIonJSScript(cx)->ionScript();
GetElementIC &cache = ion->getCache(cacheIndex).toGetElement();
RootedScript script(cx);
jsbytecode *pc;
cache.getScriptedLocation(&script, &pc);
RootedValue lval(cx, ObjectValue(*obj));
if (cache.isDisabled()) {
if (!GetElementOperation(cx, JSOp(*pc), &lval, idval, res))
return false;
types::TypeScript::Monitor(cx, script, pc, res);
return true;
}
// Override the return value if we are invalidated (bug 728188).
AutoFlushCache afc ("GetElementCache");
AutoDetectInvalidation adi(cx, res.address(), ion);
RootedId id(cx);
if (!FetchElementId(cx, obj, idval, &id, res))
return false;
bool attachedStub = false;
if (cache.canAttachStub()) {
if (obj->isNative() && cache.monitoredResult()) {
uint32_t dummy;
if (idval.isString() && JSID_IS_ATOM(id) && !JSID_TO_ATOM(id)->isIndex(&dummy)) {
RootedPropertyName name(cx, JSID_TO_ATOM(id)->asPropertyName());
if (!cache.attachGetProp(cx, ion, obj, idval, name))
return false;
attachedStub = true;
}
} else if (!cache.hasDenseStub() && obj->isNative() && idval.isInt32()) {
if (!cache.attachDenseElement(cx, ion, obj, idval))
return false;
attachedStub = true;
} else if (obj->isTypedArray() && idval.isInt32()) {
int arrayType = TypedArray::type(obj);
bool floatOutput = arrayType == TypedArray::TYPE_FLOAT32 ||
arrayType == TypedArray::TYPE_FLOAT64;
if (!floatOutput || cache.output().hasValue()) {
if (!cache.attachTypedArrayElement(cx, ion, obj, idval))
return false;
attachedStub = true;
}
}
}
if (!GetElementOperation(cx, JSOp(*pc), &lval, idval, res))
return false;
// If no new attach was done, and we've reached maximum number of stubs, then
// disable the cache.
if (!attachedStub && !cache.canAttachStub())
cache.disable();
types::TypeScript::Monitor(cx, script, pc, res);
return true;
}
bool
BindNameIC::attachGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain)
{
JS_ASSERT(scopeChain->isGlobal());
MacroAssembler masm(cx);
// Guard on the scope chain.
RepatchLabel exit_;
CodeOffsetJump exitOffset = masm.branchPtrWithPatch(Assembler::NotEqual, scopeChainReg(),
ImmGCPtr(scopeChain), &exit_);
masm.bind(&exit_);
masm.movePtr(ImmGCPtr(scopeChain), outputReg());
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
return linkAndAttachStub(cx, masm, ion, "global", rejoinOffset, &exitOffset);
}
static inline void
GenerateScopeChainGuard(MacroAssembler &masm, JSObject *scopeObj,
Register scopeObjReg, RawShape shape, Label *failures)
{
if (scopeObj->isCall()) {
// We can skip a guard on the call object if the script's bindings are
// guaranteed to be immutable (and thus cannot introduce shadowing
// variables).
CallObject *callObj = &scopeObj->asCall();
if (!callObj->isForEval()) {
RawFunction fun = &callObj->callee();
RawScript script = fun->nonLazyScript();
if (!script->funHasExtensibleScope)
return;
}
} else if (scopeObj->isGlobal()) {
// If this is the last object on the scope walk, and the property we've
// found is not configurable, then we don't need a shape guard because
// the shape cannot be removed.
if (shape && !shape->configurable())
return;
}
Address shapeAddr(scopeObjReg, JSObject::offsetOfShape());
masm.branchPtr(Assembler::NotEqual, shapeAddr, ImmGCPtr(scopeObj->lastProperty()), failures);
}
static void
GenerateScopeChainGuards(MacroAssembler &masm, JSObject *scopeChain, JSObject *holder,
Register outputReg, Label *failures)
{
JSObject *tobj = scopeChain;
// Walk up the scope chain. Note that IsCacheableScopeChain guarantees the
// |tobj == holder| condition terminates the loop.
while (true) {
JS_ASSERT(IsCacheableNonGlobalScope(tobj) || tobj->isGlobal());
GenerateScopeChainGuard(masm, tobj, outputReg, NULL, failures);
if (tobj == holder)
break;
// Load the next link.
tobj = &tobj->asScope().enclosingScope();
masm.extractObject(Address(outputReg, ScopeObject::offsetOfEnclosingScope()), outputReg);
}
}
bool
BindNameIC::attachNonGlobal(JSContext *cx, IonScript *ion, JSObject *scopeChain, JSObject *holder)
{
JS_ASSERT(IsCacheableNonGlobalScope(scopeChain));
MacroAssembler masm(cx);
// Guard on the shape of the scope chain.
RepatchLabel failures;
Label nonRepatchFailures;
CodeOffsetJump exitOffset =
masm.branchPtrWithPatch(Assembler::NotEqual,
Address(scopeChainReg(), JSObject::offsetOfShape()),
ImmGCPtr(scopeChain->lastProperty()),
&failures);
if (holder != scopeChain) {
JSObject *parent = &scopeChain->asScope().enclosingScope();
masm.extractObject(Address(scopeChainReg(), ScopeObject::offsetOfEnclosingScope()), outputReg());
GenerateScopeChainGuards(masm, parent, holder, outputReg(), &nonRepatchFailures);
} else {
masm.movePtr(scopeChainReg(), outputReg());
}
// At this point outputReg holds the object on which the property
// was found, so we're done.
RepatchLabel rejoin_;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin_);
masm.bind(&rejoin_);
// All failures flow to here, so there is a common point to patch.
masm.bind(&failures);
masm.bind(&nonRepatchFailures);
if (holder != scopeChain) {
RepatchLabel exit_;
exitOffset = masm.jumpWithPatch(&exit_);
masm.bind(&exit_);
}
return linkAndAttachStub(cx, masm, ion, "non-global", rejoinOffset, &exitOffset);
}
static bool
IsCacheableScopeChain(JSObject *scopeChain, JSObject *holder)
{
while (true) {
if (!IsCacheableNonGlobalScope(scopeChain)) {
IonSpew(IonSpew_InlineCaches, "Non-cacheable object on scope chain");
return false;
}
if (scopeChain == holder)
return true;
scopeChain = &scopeChain->asScope().enclosingScope();
if (!scopeChain) {
IonSpew(IonSpew_InlineCaches, "Scope chain indirect hit");
return false;
}
}
JS_NOT_REACHED("Shouldn't get here");
return false;
}
JSObject *
BindNameIC::update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain)
{
AutoFlushCache afc ("BindNameCache");
IonScript *ion = GetTopIonJSScript(cx)->ionScript();
BindNameIC &cache = ion->getCache(cacheIndex).toBindName();
HandlePropertyName name = cache.name();
RootedObject holder(cx);
if (scopeChain->isGlobal()) {
holder = scopeChain;
} else {
if (!LookupNameWithGlobalDefault(cx, name, scopeChain, &holder))
return NULL;
}
// Stop generating new stubs once we hit the stub count limit, see
// GetPropertyCache.
if (cache.canAttachStub()) {
if (scopeChain->isGlobal()) {
if (!cache.attachGlobal(cx, ion, scopeChain))
return NULL;
} else if (IsCacheableScopeChain(scopeChain, holder)) {
if (!cache.attachNonGlobal(cx, ion, scopeChain, holder))
return NULL;
} else {
IonSpew(IonSpew_InlineCaches, "BINDNAME uncacheable scope chain");
}
}
return holder;
}
bool
NameIC::attach(JSContext *cx, IonScript *ion, HandleObject scopeChain, HandleObject holder, HandleShape shape)
{
MacroAssembler masm(cx);
Label failures;
Register scratchReg = outputReg().valueReg().scratchReg();
masm.mov(scopeChainReg(), scratchReg);
GenerateScopeChainGuards(masm, scopeChain, holder, scratchReg, &failures);
unsigned slot = shape->slot();
if (holder->isFixedSlot(slot)) {
Address addr(scratchReg, JSObject::getFixedSlotOffset(slot));
masm.loadTypedOrValue(addr, outputReg());
} else {
masm.loadPtr(Address(scratchReg, JSObject::offsetOfSlots()), scratchReg);
Address addr(scratchReg, holder->dynamicSlotIndex(slot) * sizeof(Value));
masm.loadTypedOrValue(addr, outputReg());
}
RepatchLabel rejoin;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin);
masm.bind(&rejoin);
CodeOffsetJump exitOffset;
if (failures.used()) {
masm.bind(&failures);
RepatchLabel exit;
exitOffset = masm.jumpWithPatch(&exit);
masm.bind(&exit);
}
return linkAndAttachStub(cx, masm, ion, "generic", rejoinOffset,
(failures.bound() ? &exitOffset : NULL));
}
static bool
IsCacheableName(JSContext *cx, HandleObject scopeChain, HandleObject obj, HandleObject holder,
HandleShape shape, jsbytecode *pc, const TypedOrValueRegister &output)
{
if (!shape)
return false;
if (!obj->isNative())
return false;
if (obj != holder)
return false;
if (obj->isGlobal()) {
// Support only simple property lookups.
if (!IsCacheableGetPropReadSlot(obj, holder, shape) &&
!IsCacheableNoProperty(obj, holder, shape, pc, output))
return false;
} else if (obj->isCall()) {
if (!shape->hasDefaultGetter())
return false;
} else {
// We don't yet support lookups on Block or DeclEnv objects.
return false;
}
RootedObject obj2(cx, scopeChain);
while (obj2) {
if (!IsCacheableNonGlobalScope(obj2) && !obj2->isGlobal())
return false;
// Stop once we hit the global or target obj.
if (obj2->isGlobal() || obj2 == obj)
break;
obj2 = obj2->enclosingScope();
}
return obj == obj2;
}
bool
NameIC::update(JSContext *cx, size_t cacheIndex, HandleObject scopeChain,
MutableHandleValue vp)
{
AutoFlushCache afc ("GetNameCache");
IonScript *ion = GetTopIonJSScript(cx)->ionScript();
NameIC &cache = ion->getCache(cacheIndex).toName();
RootedPropertyName name(cx, cache.name());
RootedScript script(cx);
jsbytecode *pc;
cache.getScriptedLocation(&script, &pc);
RootedObject obj(cx);
RootedObject holder(cx);
RootedShape shape(cx);
if (!LookupName(cx, name, scopeChain, &obj, &holder, &shape))
return false;
if (cache.canAttachStub() &&
IsCacheableName(cx, scopeChain, obj, holder, shape, pc, cache.outputReg()))
{
if (!cache.attach(cx, ion, scopeChain, obj, shape))
return false;
}
if (cache.isTypeOf()) {
if (!FetchName<true>(cx, obj, holder, name, shape, vp))
return false;
} else {
if (!FetchName<false>(cx, obj, holder, name, shape, vp))
return false;
}
// Monitor changes to cache entry.
types::TypeScript::Monitor(cx, script, pc, vp);
return true;
}
bool
CallsiteCloneIC::attach(JSContext *cx, IonScript *ion, HandleFunction original,
HandleFunction clone)
{
MacroAssembler masm(cx);
// Guard against object identity on the original.
RepatchLabel exit;
CodeOffsetJump exitOffset = masm.branchPtrWithPatch(Assembler::NotEqual, calleeReg(),
ImmWord(uintptr_t(original.get())), &exit);
masm.bind(&exit);
// Load the clone.
masm.movePtr(ImmWord(uintptr_t(clone.get())), outputReg());
RepatchLabel rejoin;
CodeOffsetJump rejoinOffset = masm.jumpWithPatch(&rejoin);
masm.bind(&rejoin);
return linkAndAttachStub(cx, masm, ion, "generic", rejoinOffset, &exitOffset);
}
JSObject *
CallsiteCloneIC::update(JSContext *cx, size_t cacheIndex, HandleObject callee)
{
AutoFlushCache afc ("CallsiteCloneCache");
// Act as the identity for functions that are not clone-at-callsite, as we
// generate this cache as long as some callees are clone-at-callsite.
RootedFunction fun(cx, callee->toFunction());
if (!fun->nonLazyScript()->shouldCloneAtCallsite)
return fun;
IonScript *ion = GetTopIonJSScript(cx)->ionScript();
CallsiteCloneIC &cache = ion->getCache(cacheIndex).toCallsiteClone();
RootedFunction clone(cx, CloneFunctionAtCallsite(cx, fun, cache.callScript(), cache.callPc()));
if (!clone)
return NULL;
if (cache.canAttachStub()) {
if (!cache.attach(cx, ion, fun, clone))
return NULL;
}
return clone;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment