2012-01-20 14:08:20 +00:00
|
|
|
// Copyright 2012 the V8 project authors. All rights reserved.
|
2014-04-29 06:42:26 +00:00
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
#ifndef V8_UTILS_H_
|
|
|
|
#define V8_UTILS_H_
|
|
|
|
|
2014-01-14 09:57:05 +00:00
|
|
|
#include <limits.h>
|
2008-11-30 00:19:08 +00:00
|
|
|
#include <stdlib.h>
|
2010-03-12 10:20:01 +00:00
|
|
|
#include <string.h>
|
2008-11-30 00:19:08 +00:00
|
|
|
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/allocation.h"
|
2014-06-17 16:27:19 +00:00
|
|
|
#include "src/base/macros.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/checks.h"
|
|
|
|
#include "src/globals.h"
|
2014-06-03 16:12:48 +00:00
|
|
|
#include "src/list.h"
|
2014-06-03 08:12:43 +00:00
|
|
|
#include "src/platform.h"
|
|
|
|
#include "src/vector.h"
|
2010-11-05 08:18:53 +00:00
|
|
|
|
2009-05-25 10:05:56 +00:00
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// General helper functions
|
|
|
|
|
2014-04-01 09:20:28 +00:00
|
|
|
// Returns true iff x is a power of 2. Cannot be used with the maximally
|
|
|
|
// negative value of the type T (the -1 overflows).
|
2008-07-03 15:10:15 +00:00
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline bool IsPowerOf2(T x) {
|
2010-06-07 09:36:30 +00:00
|
|
|
return IS_POWER_OF_TWO(x);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-14 11:20:36 +00:00
|
|
|
// X must be a power of 2. Returns the number of trailing zeros.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline int WhichPowerOf2(uint32_t x) {
|
2010-06-14 11:20:36 +00:00
|
|
|
ASSERT(IsPowerOf2(x));
|
|
|
|
int bits = 0;
|
|
|
|
#ifdef DEBUG
|
|
|
|
int original_x = x;
|
|
|
|
#endif
|
|
|
|
if (x >= 0x10000) {
|
|
|
|
bits += 16;
|
|
|
|
x >>= 16;
|
|
|
|
}
|
|
|
|
if (x >= 0x100) {
|
|
|
|
bits += 8;
|
|
|
|
x >>= 8;
|
|
|
|
}
|
|
|
|
if (x >= 0x10) {
|
|
|
|
bits += 4;
|
|
|
|
x >>= 4;
|
|
|
|
}
|
|
|
|
switch (x) {
|
|
|
|
default: UNREACHABLE();
|
|
|
|
case 8: bits++; // Fall through.
|
|
|
|
case 4: bits++; // Fall through.
|
|
|
|
case 2: bits++; // Fall through.
|
|
|
|
case 1: break;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(1 << bits, original_x);
|
|
|
|
return bits;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-04 15:39:56 +00:00
|
|
|
inline int MostSignificantBit(uint32_t x) {
|
|
|
|
static const int msb4[] = {0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4};
|
|
|
|
int nibble = 0;
|
|
|
|
if (x & 0xffff0000) {
|
|
|
|
nibble += 16;
|
|
|
|
x >>= 16;
|
|
|
|
}
|
|
|
|
if (x & 0xff00) {
|
|
|
|
nibble += 8;
|
|
|
|
x >>= 8;
|
|
|
|
}
|
|
|
|
if (x & 0xf0) {
|
|
|
|
nibble += 4;
|
|
|
|
x >>= 4;
|
|
|
|
}
|
|
|
|
return nibble + msb4[x];
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-06 10:02:49 +00:00
|
|
|
// The C++ standard leaves the semantics of '>>' undefined for
|
|
|
|
// negative signed operands. Most implementations do the right thing,
|
|
|
|
// though.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline int ArithmeticShiftRight(int x, int s) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return x >> s;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Compute the 0-relative offset of some absolute value x of type T.
|
2008-08-06 10:02:49 +00:00
|
|
|
// This allows conversion of Addresses and integral types into
|
|
|
|
// 0-relative int offsets.
|
2008-07-03 15:10:15 +00:00
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline intptr_t OffsetFrom(T x) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return x - static_cast<T>(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Compute the absolute value of type T for some 0-relative offset x.
|
2008-08-06 10:02:49 +00:00
|
|
|
// This allows conversion of 0-relative int offsets into Addresses and
|
|
|
|
// integral types.
|
2008-07-03 15:10:15 +00:00
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline T AddressFrom(intptr_t x) {
|
2009-11-11 09:50:06 +00:00
|
|
|
return static_cast<T>(static_cast<T>(0) + x);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Return the largest multiple of m which is <= x.
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline T RoundDown(T x, intptr_t m) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(IsPowerOf2(m));
|
|
|
|
return AddressFrom<T>(OffsetFrom(x) & -m);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Return the smallest multiple of m which is >= x.
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline T RoundUp(T x, intptr_t m) {
|
2011-09-19 18:36:47 +00:00
|
|
|
return RoundDown<T>(static_cast<T>(x + m - 1), m);
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-12 09:19:30 +00:00
|
|
|
// Increment a pointer until it has the specified alignment.
|
|
|
|
// This works like RoundUp, but it works correctly on pointer types where
|
|
|
|
// sizeof(*pointer) might not be 1.
|
|
|
|
template<class T>
|
|
|
|
T AlignUp(T pointer, size_t alignment) {
|
|
|
|
ASSERT(sizeof(pointer) == sizeof(uintptr_t));
|
|
|
|
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
|
|
|
|
return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-25 11:07:48 +00:00
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
int Compare(const T& a, const T& b) {
|
2008-11-25 11:07:48 +00:00
|
|
|
if (a == b)
|
|
|
|
return 0;
|
|
|
|
else if (a < b)
|
|
|
|
return -1;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
int PointerValueCompare(const T* a, const T* b) {
|
2008-11-26 08:03:55 +00:00
|
|
|
return Compare<T>(*a, *b);
|
2008-11-25 11:07:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-18 13:40:33 +00:00
|
|
|
// Compare function to compare the object pointer value of two
|
|
|
|
// handlified objects. The handles are passed as pointers to the
|
|
|
|
// handles.
|
|
|
|
template<typename T> class Handle; // Forward declaration.
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
|
2011-10-18 13:40:33 +00:00
|
|
|
return Compare<T*>(*(*a), *(*b));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-01-31 13:33:44 +00:00
|
|
|
// Returns the smallest power of two which is >= x. If you pass in a
|
|
|
|
// number that is already a power of two, it is returned as is.
|
|
|
|
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
|
|
|
|
// figure 3-3, page 48, where the function is called clp2.
|
|
|
|
inline uint32_t RoundUpToPowerOf2(uint32_t x) {
|
2010-11-05 08:18:53 +00:00
|
|
|
ASSERT(x <= 0x80000000u);
|
|
|
|
x = x - 1;
|
|
|
|
x = x | (x >> 1);
|
|
|
|
x = x | (x >> 2);
|
|
|
|
x = x | (x >> 4);
|
|
|
|
x = x | (x >> 8);
|
|
|
|
x = x | (x >> 16);
|
2012-01-31 13:33:44 +00:00
|
|
|
return x + 1;
|
2010-11-05 08:18:53 +00:00
|
|
|
}
|
|
|
|
|
2008-08-06 10:02:49 +00:00
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline uint32_t RoundDownToPowerOf2(uint32_t x) {
|
2011-09-19 18:36:47 +00:00
|
|
|
uint32_t rounded_up = RoundUpToPowerOf2(x);
|
|
|
|
if (rounded_up > x) return rounded_up >> 1;
|
|
|
|
return rounded_up;
|
|
|
|
}
|
2008-08-06 10:02:49 +00:00
|
|
|
|
2011-09-19 18:36:47 +00:00
|
|
|
|
|
|
|
template <typename T, typename U>
|
2011-11-29 10:56:11 +00:00
|
|
|
inline bool IsAligned(T value, U alignment) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return (value & (alignment - 1)) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Returns true if (addr + offset) is aligned.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline bool IsAddressAligned(Address addr,
|
|
|
|
intptr_t alignment,
|
|
|
|
int offset = 0) {
|
2009-08-24 11:56:29 +00:00
|
|
|
intptr_t offs = OffsetFrom(addr + offset);
|
2008-07-03 15:10:15 +00:00
|
|
|
return IsAligned(offs, alignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Returns the maximum of the two parameters.
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
T Max(T a, T b) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return a < b ? b : a;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Returns the minimum of the two parameters.
|
|
|
|
template <typename T>
|
2011-11-29 10:56:11 +00:00
|
|
|
T Min(T a, T b) {
|
2008-07-03 15:10:15 +00:00
|
|
|
return a < b ? a : b;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Improve code for integral modulus calculation.
Depending on what we know about the right operand, we basically do 3
different things (and the code is actually structured this way):
* If we statically know that the right operand is a power of 2, we do
some bit fiddling instead of doing a "real" modulus calculation.
This should actually be done on the Hydrogen level, not on the
Lithium level, but this will be a separate CL.
* If type feedback tells us that the right operand is a power of 2, we
do the same as above, but guarded by conditional deoptimization to
make sure that the assumption is still valid. In the long run, we
should make this guard visible on the Hydrogen level to make it
visible for GVN and other optimizations.
* In the general case we only do the minimum steps necessary and don't
try to be too clever, because cleverness actually slows us down on
real-world code.
If we look at the code gerators for LModI, we actually see that we
basically have 3 (4 on ARM) fundamentally different translations. I
don't really like lumping them together, they should probably be
different Lithium instructions. For the time being, I restructured the
generators to make this crystal-clear, at the cost of some duplication
regarding the power-of-2 cases. This will go away when we do the
strength reduction on the Hydrogen level, so I'd like to keep it as it
is for now.
Note that the MIPS part was only slightly restructured, there is still
some work to do there.
R=jkummerow@chromium.org
Review URL: https://codereview.chromium.org/15769010
git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15034 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
2013-06-10 12:05:54 +00:00
|
|
|
// Returns the absolute value of its argument.
|
|
|
|
template <typename T>
|
|
|
|
T Abs(T a) {
|
|
|
|
return a < 0 ? -a : a;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-29 12:42:04 +00:00
|
|
|
// Returns the negative absolute value of its argument.
|
|
|
|
template <typename T>
|
|
|
|
T NegAbs(T a) {
|
|
|
|
return a < 0 ? a : -a;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-07 10:36:28 +00:00
|
|
|
// TODO(svenpanne) Clean up the whole power-of-2 mess.
|
|
|
|
inline int32_t WhichPowerOf2Abs(int32_t x) {
|
|
|
|
return (x == kMinInt) ? 31 : WhichPowerOf2(Abs(x));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-12 11:20:31 +00:00
|
|
|
// Obtains the unsigned type corresponding to T
|
|
|
|
// available in C++11 as std::make_unsigned
|
|
|
|
template<typename T>
|
|
|
|
struct make_unsigned {
|
|
|
|
typedef T type;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Template specializations necessary to have make_unsigned work
|
|
|
|
template<> struct make_unsigned<int32_t> {
|
|
|
|
typedef uint32_t type;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template<> struct make_unsigned<int64_t> {
|
|
|
|
typedef uint64_t type;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
2008-08-06 10:02:49 +00:00
|
|
|
// BitField is a help template for encoding and decode bitfield with
|
|
|
|
// unsigned content.
|
2013-05-15 12:29:13 +00:00
|
|
|
|
|
|
|
template<class T, int shift, int size, class U>
|
|
|
|
class BitFieldBase {
|
2008-07-03 15:10:15 +00:00
|
|
|
public:
|
2013-05-15 12:29:13 +00:00
|
|
|
// A type U mask of bit field. To use all bits of a type U of x bits
|
|
|
|
// in a bitfield without compiler warnings we have to compute 2^x
|
|
|
|
// without using a shift count of x in the computation.
|
|
|
|
static const U kOne = static_cast<U>(1U);
|
|
|
|
static const U kMask = ((kOne << shift) << size) - (kOne << shift);
|
|
|
|
static const U kShift = shift;
|
|
|
|
static const U kSize = size;
|
2014-06-10 14:01:08 +00:00
|
|
|
static const U kNext = kShift + kSize;
|
2011-09-12 10:50:50 +00:00
|
|
|
|
|
|
|
// Value for the field with all bits set.
|
|
|
|
static const T kMax = static_cast<T>((1U << size) - 1);
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Tells whether the provided value fits into the bit field.
|
|
|
|
static bool is_valid(T value) {
|
2013-05-15 12:29:13 +00:00
|
|
|
return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2013-05-15 12:29:13 +00:00
|
|
|
// Returns a type U with the bit field value encoded.
|
|
|
|
static U encode(T value) {
|
2008-07-03 15:10:15 +00:00
|
|
|
ASSERT(is_valid(value));
|
2013-05-15 12:29:13 +00:00
|
|
|
return static_cast<U>(value) << shift;
|
2008-07-03 15:10:15 +00:00
|
|
|
}
|
|
|
|
|
2013-05-15 12:29:13 +00:00
|
|
|
// Returns a type U with the bit field value updated.
|
|
|
|
static U update(U previous, T value) {
|
2011-09-12 10:50:50 +00:00
|
|
|
return (previous & ~kMask) | encode(value);
|
2011-05-24 14:01:36 +00:00
|
|
|
}
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// Extracts the bit field from the value.
|
2013-05-15 12:29:13 +00:00
|
|
|
static T decode(U value) {
|
2011-09-12 10:50:50 +00:00
|
|
|
return static_cast<T>((value & kMask) >> shift);
|
2010-12-07 11:31:57 +00:00
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2013-05-15 12:29:13 +00:00
|
|
|
template<class T, int shift, int size>
|
|
|
|
class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
|
|
|
|
|
|
|
|
|
|
|
|
template<class T, int shift, int size>
|
|
|
|
class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
|
|
|
|
|
|
|
|
|
2009-05-18 13:14:37 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Hash function.
|
|
|
|
|
2012-01-10 12:58:41 +00:00
|
|
|
static const uint32_t kZeroHashSeed = 0;
|
|
|
|
|
2010-11-05 08:18:53 +00:00
|
|
|
// Thomas Wang, Integer Hash Functions.
|
|
|
|
// http://www.concentric.net/~Ttwang/tech/inthash.htm
|
2012-01-10 12:58:41 +00:00
|
|
|
inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
|
2010-11-05 08:18:53 +00:00
|
|
|
uint32_t hash = key;
|
2012-01-10 12:58:41 +00:00
|
|
|
hash = hash ^ seed;
|
2010-11-05 08:18:53 +00:00
|
|
|
hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
|
|
|
|
hash = hash ^ (hash >> 12);
|
|
|
|
hash = hash + (hash << 2);
|
|
|
|
hash = hash ^ (hash >> 4);
|
|
|
|
hash = hash * 2057; // hash = (hash + (hash << 3)) + (hash << 11);
|
|
|
|
hash = hash ^ (hash >> 16);
|
|
|
|
return hash;
|
|
|
|
}
|
2008-07-03 15:10:15 +00:00
|
|
|
|
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline uint32_t ComputeLongHash(uint64_t key) {
|
2011-10-25 14:14:56 +00:00
|
|
|
uint64_t hash = key;
|
|
|
|
hash = ~hash + (hash << 18); // hash = (hash << 18) - hash - 1;
|
|
|
|
hash = hash ^ (hash >> 31);
|
|
|
|
hash = hash * 21; // hash = (hash + (hash << 2)) + (hash << 4);
|
|
|
|
hash = hash ^ (hash >> 11);
|
|
|
|
hash = hash + (hash << 6);
|
|
|
|
hash = hash ^ (hash >> 22);
|
2013-01-07 15:02:56 +00:00
|
|
|
return static_cast<uint32_t>(hash);
|
2011-10-25 14:14:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-11-29 10:56:11 +00:00
|
|
|
inline uint32_t ComputePointerHash(void* ptr) {
|
2011-04-29 16:06:25 +00:00
|
|
|
return ComputeIntegerHash(
|
2012-01-10 12:58:41 +00:00
|
|
|
static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
|
|
|
|
v8::internal::kZeroHashSeed);
|
2011-04-29 16:06:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-27 07:57:22 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Generated memcpy/memmove
|
|
|
|
|
|
|
|
// Initializes the codegen support that depends on CPU features. This is
|
|
|
|
// called after CPU initialization.
|
|
|
|
void init_memcopy_functions();
|
|
|
|
|
|
|
|
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
|
|
|
|
// Limit below which the extra overhead of the MemCopy function is likely
|
|
|
|
// to outweigh the benefits of faster copying.
|
|
|
|
const int kMinComplexMemCopy = 64;
|
|
|
|
|
|
|
|
// Copy memory area. No restrictions.
|
|
|
|
void MemMove(void* dest, const void* src, size_t size);
|
|
|
|
typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
|
|
|
|
|
|
|
|
// Keep the distinction of "move" vs. "copy" for the benefit of other
|
|
|
|
// architectures.
|
|
|
|
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
|
|
|
|
MemMove(dest, src, size);
|
|
|
|
}
|
|
|
|
#elif defined(V8_HOST_ARCH_ARM)
|
|
|
|
typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
|
|
|
|
size_t size);
|
|
|
|
extern MemCopyUint8Function memcopy_uint8_function;
|
|
|
|
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
|
|
|
|
size_t chars) {
|
|
|
|
memcpy(dest, src, chars);
|
|
|
|
}
|
|
|
|
// For values < 16, the assembler function is slower than the inlined C code.
|
|
|
|
const int kMinComplexMemCopy = 16;
|
|
|
|
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
|
|
|
|
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
|
|
|
|
reinterpret_cast<const uint8_t*>(src), size);
|
|
|
|
}
|
|
|
|
V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
|
|
|
|
memmove(dest, src, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
|
|
|
|
size_t size);
|
|
|
|
extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
|
|
|
|
void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
|
|
|
|
size_t chars);
|
|
|
|
// For values < 12, the assembler function is slower than the inlined C code.
|
|
|
|
const int kMinComplexConvertMemCopy = 12;
|
|
|
|
V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
|
|
|
|
size_t size) {
|
|
|
|
(*memcopy_uint16_uint8_function)(dest, src, size);
|
|
|
|
}
|
|
|
|
#elif defined(V8_HOST_ARCH_MIPS)
|
|
|
|
typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
|
|
|
|
size_t size);
|
|
|
|
extern MemCopyUint8Function memcopy_uint8_function;
|
|
|
|
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
|
|
|
|
size_t chars) {
|
|
|
|
memcpy(dest, src, chars);
|
|
|
|
}
|
|
|
|
// For values < 16, the assembler function is slower than the inlined C code.
|
|
|
|
const int kMinComplexMemCopy = 16;
|
|
|
|
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
|
|
|
|
(*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
|
|
|
|
reinterpret_cast<const uint8_t*>(src), size);
|
|
|
|
}
|
|
|
|
V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
|
|
|
|
memmove(dest, src, size);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
// Copy memory area to disjoint memory area.
|
|
|
|
V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
|
|
|
|
memcpy(dest, src, size);
|
|
|
|
}
|
|
|
|
V8_INLINE void MemMove(void* dest, const void* src, size_t size) {
|
|
|
|
memmove(dest, src, size);
|
|
|
|
}
|
|
|
|
const int kMinComplexMemCopy = 16 * kPointerSize;
|
|
|
|
#endif // V8_TARGET_ARCH_IA32
|
|
|
|
|
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Miscellaneous
|
|
|
|
|
|
|
|
// A static resource holds a static instance that can be reserved in
|
|
|
|
// a local scope using an instance of Access. Attempts to re-reserve
|
|
|
|
// the instance will cause an error.
|
|
|
|
template <typename T>
|
|
|
|
class StaticResource {
|
|
|
|
public:
|
|
|
|
StaticResource() : is_reserved_(false) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
template <typename S> friend class Access;
|
|
|
|
T instance_;
|
|
|
|
bool is_reserved_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Locally scoped access to a static resource.
|
|
|
|
template <typename T>
|
|
|
|
class Access {
|
|
|
|
public:
|
|
|
|
explicit Access(StaticResource<T>* resource)
|
|
|
|
: resource_(resource)
|
|
|
|
, instance_(&resource->instance_) {
|
|
|
|
ASSERT(!resource->is_reserved_);
|
|
|
|
resource->is_reserved_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
~Access() {
|
|
|
|
resource_->is_reserved_ = false;
|
|
|
|
resource_ = NULL;
|
|
|
|
instance_ = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
T* value() { return instance_; }
|
|
|
|
T* operator -> () { return instance_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
StaticResource<T>* resource_;
|
|
|
|
T* instance_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
// A pointer that can only be set once and doesn't allow NULL values.
|
|
|
|
template<typename T>
|
|
|
|
class SetOncePointer {
|
|
|
|
public:
|
|
|
|
SetOncePointer() : pointer_(NULL) { }
|
|
|
|
|
|
|
|
bool is_set() const { return pointer_ != NULL; }
|
|
|
|
|
|
|
|
T* get() const {
|
|
|
|
ASSERT(pointer_ != NULL);
|
|
|
|
return pointer_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set(T* value) {
|
|
|
|
ASSERT(pointer_ == NULL && value != NULL);
|
|
|
|
pointer_ = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
T* pointer_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-09-11 15:03:22 +00:00
|
|
|
template <typename T, int kSize>
|
2008-09-11 14:34:48 +00:00
|
|
|
class EmbeddedVector : public Vector<T> {
|
|
|
|
public:
|
2008-09-11 15:03:22 +00:00
|
|
|
EmbeddedVector() : Vector<T>(buffer_, kSize) { }
|
2009-05-29 09:00:39 +00:00
|
|
|
|
2010-12-07 11:31:57 +00:00
|
|
|
explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
|
|
|
|
for (int i = 0; i < kSize; ++i) {
|
|
|
|
buffer_[i] = initial_value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-29 09:00:39 +00:00
|
|
|
// When copying, make underlying Vector to reference our buffer.
|
|
|
|
EmbeddedVector(const EmbeddedVector& rhs)
|
|
|
|
: Vector<T>(rhs) {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
|
2009-05-29 09:00:39 +00:00
|
|
|
set_start(buffer_);
|
|
|
|
}
|
|
|
|
|
|
|
|
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
|
|
|
|
if (this == &rhs) return *this;
|
|
|
|
Vector<T>::operator=(rhs);
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
|
2010-03-12 10:20:01 +00:00
|
|
|
this->set_start(buffer_);
|
2009-05-29 09:00:39 +00:00
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2008-09-11 14:34:48 +00:00
|
|
|
private:
|
2008-09-11 15:03:22 +00:00
|
|
|
T buffer_[kSize];
|
2008-09-11 14:34:48 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-08-24 10:53:44 +00:00
|
|
|
/*
|
|
|
|
* A class that collects values into a backing store.
|
|
|
|
* Specialized versions of the class can allow access to the backing store
|
|
|
|
* in different ways.
|
|
|
|
* There is no guarantee that the backing store is contiguous (and, as a
|
|
|
|
* consequence, no guarantees that consecutively added elements are adjacent
|
|
|
|
* in memory). The collector may move elements unless it has guaranteed not
|
|
|
|
* to.
|
|
|
|
*/
|
2010-08-27 08:26:29 +00:00
|
|
|
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
|
2010-08-24 10:53:44 +00:00
|
|
|
class Collector {
|
|
|
|
public:
|
2010-08-27 08:26:29 +00:00
|
|
|
explicit Collector(int initial_capacity = kMinCapacity)
|
|
|
|
: index_(0), size_(0) {
|
|
|
|
current_chunk_ = Vector<T>::New(initial_capacity);
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~Collector() {
|
|
|
|
// Free backing store (in reverse allocation order).
|
2010-08-27 08:26:29 +00:00
|
|
|
current_chunk_.Dispose();
|
2010-08-24 10:53:44 +00:00
|
|
|
for (int i = chunks_.length() - 1; i >= 0; i--) {
|
|
|
|
chunks_.at(i).Dispose();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a single element.
|
|
|
|
inline void Add(T value) {
|
2010-08-27 08:26:29 +00:00
|
|
|
if (index_ >= current_chunk_.length()) {
|
2010-08-24 10:53:44 +00:00
|
|
|
Grow(1);
|
|
|
|
}
|
|
|
|
current_chunk_[index_] = value;
|
|
|
|
index_++;
|
2010-08-27 08:26:29 +00:00
|
|
|
size_++;
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add a block of contiguous elements and return a Vector backed by the
|
|
|
|
// memory area.
|
|
|
|
// A basic Collector will keep this vector valid as long as the Collector
|
|
|
|
// is alive.
|
|
|
|
inline Vector<T> AddBlock(int size, T initial_value) {
|
2010-08-27 08:26:29 +00:00
|
|
|
ASSERT(size > 0);
|
|
|
|
if (size > current_chunk_.length() - index_) {
|
2010-08-24 10:53:44 +00:00
|
|
|
Grow(size);
|
|
|
|
}
|
2010-08-27 08:26:29 +00:00
|
|
|
T* position = current_chunk_.start() + index_;
|
2010-08-24 10:53:44 +00:00
|
|
|
index_ += size;
|
2010-08-27 08:26:29 +00:00
|
|
|
size_ += size;
|
2010-08-24 10:53:44 +00:00
|
|
|
for (int i = 0; i < size; i++) {
|
|
|
|
position[i] = initial_value;
|
|
|
|
}
|
|
|
|
return Vector<T>(position, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-22 20:14:19 +00:00
|
|
|
// Add a contiguous block of elements and return a vector backed
|
|
|
|
// by the added block.
|
|
|
|
// A basic Collector will keep this vector valid as long as the Collector
|
|
|
|
// is alive.
|
|
|
|
inline Vector<T> AddBlock(Vector<const T> source) {
|
|
|
|
if (source.length() > current_chunk_.length() - index_) {
|
|
|
|
Grow(source.length());
|
|
|
|
}
|
|
|
|
T* position = current_chunk_.start() + index_;
|
|
|
|
index_ += source.length();
|
|
|
|
size_ += source.length();
|
|
|
|
for (int i = 0; i < source.length(); i++) {
|
|
|
|
position[i] = source[i];
|
|
|
|
}
|
|
|
|
return Vector<T>(position, source.length());
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-27 08:26:29 +00:00
|
|
|
// Write the contents of the collector into the provided vector.
|
|
|
|
void WriteTo(Vector<T> destination) {
|
|
|
|
ASSERT(size_ <= destination.length());
|
2010-08-24 10:53:44 +00:00
|
|
|
int position = 0;
|
|
|
|
for (int i = 0; i < chunks_.length(); i++) {
|
|
|
|
Vector<T> chunk = chunks_.at(i);
|
|
|
|
for (int j = 0; j < chunk.length(); j++) {
|
2010-08-27 08:26:29 +00:00
|
|
|
destination[position] = chunk[j];
|
2010-08-24 10:53:44 +00:00
|
|
|
position++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int i = 0; i < index_; i++) {
|
2010-08-27 08:26:29 +00:00
|
|
|
destination[position] = current_chunk_[i];
|
2010-08-24 10:53:44 +00:00
|
|
|
position++;
|
|
|
|
}
|
2010-08-27 08:26:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a single contiguous vector, copy all the collected
|
|
|
|
// elements to the vector, and return it.
|
|
|
|
// The caller is responsible for freeing the memory of the returned
|
|
|
|
// vector (e.g., using Vector::Dispose).
|
|
|
|
Vector<T> ToVector() {
|
|
|
|
Vector<T> new_store = Vector<T>::New(size_);
|
|
|
|
WriteTo(new_store);
|
|
|
|
return new_store;
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
2010-08-24 12:29:50 +00:00
|
|
|
// Resets the collector to be empty.
|
2011-05-03 08:23:58 +00:00
|
|
|
virtual void Reset();
|
2010-08-24 12:29:50 +00:00
|
|
|
|
2010-08-27 08:26:29 +00:00
|
|
|
// Total number of elements added to collector so far.
|
|
|
|
inline int size() { return size_; }
|
|
|
|
|
2010-08-24 10:53:44 +00:00
|
|
|
protected:
|
|
|
|
static const int kMinCapacity = 16;
|
|
|
|
List<Vector<T> > chunks_;
|
2010-08-27 08:26:29 +00:00
|
|
|
Vector<T> current_chunk_; // Block of memory currently being written into.
|
|
|
|
int index_; // Current index in current chunk.
|
|
|
|
int size_; // Total number of elements in collector.
|
2010-08-24 10:53:44 +00:00
|
|
|
|
|
|
|
// Creates a new current chunk, and stores the old chunk in the chunks_ list.
|
|
|
|
void Grow(int min_capacity) {
|
2010-08-27 08:26:29 +00:00
|
|
|
ASSERT(growth_factor > 1);
|
2011-09-07 12:39:53 +00:00
|
|
|
int new_capacity;
|
|
|
|
int current_length = current_chunk_.length();
|
|
|
|
if (current_length < kMinCapacity) {
|
|
|
|
// The collector started out as empty.
|
|
|
|
new_capacity = min_capacity * growth_factor;
|
|
|
|
if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
|
|
|
|
} else {
|
|
|
|
int growth = current_length * (growth_factor - 1);
|
|
|
|
if (growth > max_growth) {
|
|
|
|
growth = max_growth;
|
|
|
|
}
|
|
|
|
new_capacity = current_length + growth;
|
|
|
|
if (new_capacity < min_capacity) {
|
|
|
|
new_capacity = min_capacity + growth;
|
|
|
|
}
|
2010-08-27 08:26:29 +00:00
|
|
|
}
|
2011-09-08 13:44:11 +00:00
|
|
|
NewChunk(new_capacity);
|
2010-08-27 08:26:29 +00:00
|
|
|
ASSERT(index_ + min_capacity <= current_chunk_.length());
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Before replacing the current chunk, give a subclass the option to move
|
|
|
|
// some of the current data into the new chunk. The function may update
|
|
|
|
// the current index_ value to represent data no longer in the current chunk.
|
|
|
|
// Returns the initial index of the new chunk (after copied data).
|
2011-09-08 13:44:11 +00:00
|
|
|
virtual void NewChunk(int new_capacity) {
|
|
|
|
Vector<T> new_chunk = Vector<T>::New(new_capacity);
|
|
|
|
if (index_ > 0) {
|
|
|
|
chunks_.Add(current_chunk_.SubVector(0, index_));
|
|
|
|
} else {
|
|
|
|
current_chunk_.Dispose();
|
|
|
|
}
|
|
|
|
current_chunk_ = new_chunk;
|
|
|
|
index_ = 0;
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A collector that allows sequences of values to be guaranteed to
|
|
|
|
* stay consecutive.
|
|
|
|
* If the backing store grows while a sequence is active, the current
|
|
|
|
* sequence might be moved, but after the sequence is ended, it will
|
|
|
|
* not move again.
|
|
|
|
* NOTICE: Blocks allocated using Collector::AddBlock(int) can move
|
|
|
|
* as well, if inside an active sequence where another element is added.
|
|
|
|
*/
|
2010-08-27 08:26:29 +00:00
|
|
|
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
|
|
|
|
class SequenceCollector : public Collector<T, growth_factor, max_growth> {
|
2010-08-24 10:53:44 +00:00
|
|
|
public:
|
2010-08-27 08:26:29 +00:00
|
|
|
explicit SequenceCollector(int initial_capacity)
|
|
|
|
: Collector<T, growth_factor, max_growth>(initial_capacity),
|
2010-08-24 10:53:44 +00:00
|
|
|
sequence_start_(kNoSequence) { }
|
|
|
|
|
|
|
|
virtual ~SequenceCollector() {}
|
|
|
|
|
|
|
|
void StartSequence() {
|
|
|
|
ASSERT(sequence_start_ == kNoSequence);
|
|
|
|
sequence_start_ = this->index_;
|
|
|
|
}
|
|
|
|
|
|
|
|
Vector<T> EndSequence() {
|
|
|
|
ASSERT(sequence_start_ != kNoSequence);
|
|
|
|
int sequence_start = sequence_start_;
|
|
|
|
sequence_start_ = kNoSequence;
|
2010-08-27 08:26:29 +00:00
|
|
|
if (sequence_start == this->index_) return Vector<T>();
|
|
|
|
return this->current_chunk_.SubVector(sequence_start, this->index_);
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
|
2010-08-24 12:29:50 +00:00
|
|
|
// Drops the currently added sequence, and all collected elements in it.
|
|
|
|
void DropSequence() {
|
|
|
|
ASSERT(sequence_start_ != kNoSequence);
|
2010-08-27 08:26:29 +00:00
|
|
|
int sequence_length = this->index_ - sequence_start_;
|
2010-08-24 12:29:50 +00:00
|
|
|
this->index_ = sequence_start_;
|
2010-08-27 08:26:29 +00:00
|
|
|
this->size_ -= sequence_length;
|
2010-08-24 12:29:50 +00:00
|
|
|
sequence_start_ = kNoSequence;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Reset() {
|
|
|
|
sequence_start_ = kNoSequence;
|
2010-08-27 08:26:29 +00:00
|
|
|
this->Collector<T, growth_factor, max_growth>::Reset();
|
2010-08-24 12:29:50 +00:00
|
|
|
}
|
|
|
|
|
2010-08-24 10:53:44 +00:00
|
|
|
private:
|
|
|
|
static const int kNoSequence = -1;
|
|
|
|
int sequence_start_;
|
|
|
|
|
|
|
|
// Move the currently active sequence to the new chunk.
|
2011-09-08 13:44:11 +00:00
|
|
|
virtual void NewChunk(int new_capacity) {
|
|
|
|
if (sequence_start_ == kNoSequence) {
|
|
|
|
// Fall back on default behavior if no sequence has been started.
|
|
|
|
this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
|
|
|
|
return;
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
2011-09-08 13:44:11 +00:00
|
|
|
int sequence_length = this->index_ - sequence_start_;
|
|
|
|
Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
|
|
|
|
ASSERT(sequence_length < new_chunk.length());
|
|
|
|
for (int i = 0; i < sequence_length; i++) {
|
|
|
|
new_chunk[i] = this->current_chunk_[sequence_start_ + i];
|
|
|
|
}
|
|
|
|
if (sequence_start_ > 0) {
|
|
|
|
this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
|
|
|
|
} else {
|
|
|
|
this->current_chunk_.Dispose();
|
|
|
|
}
|
|
|
|
this->current_chunk_ = new_chunk;
|
|
|
|
this->index_ = sequence_length;
|
|
|
|
sequence_start_ = 0;
|
2010-08-24 10:53:44 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-02-25 12:49:23 +00:00
|
|
|
// Compare ASCII/16bit chars to ASCII/16bit chars.
|
|
|
|
template <typename lchar, typename rchar>
|
2013-01-09 10:30:54 +00:00
|
|
|
inline int CompareCharsUnsigned(const lchar* lhs,
|
|
|
|
const rchar* rhs,
|
|
|
|
int chars) {
|
2010-02-25 12:49:23 +00:00
|
|
|
const lchar* limit = lhs + chars;
|
|
|
|
#ifdef V8_HOST_CAN_READ_UNALIGNED
|
|
|
|
if (sizeof(*lhs) == sizeof(*rhs)) {
|
|
|
|
// Number of characters in a uintptr_t.
|
|
|
|
static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
|
|
|
|
while (lhs <= limit - kStepSize) {
|
|
|
|
if (*reinterpret_cast<const uintptr_t*>(lhs) !=
|
|
|
|
*reinterpret_cast<const uintptr_t*>(rhs)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
lhs += kStepSize;
|
|
|
|
rhs += kStepSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
while (lhs < limit) {
|
|
|
|
int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
|
|
|
|
if (r != 0) return r;
|
|
|
|
++lhs;
|
|
|
|
++rhs;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-09 10:30:54 +00:00
|
|
|
template<typename lchar, typename rchar>
|
|
|
|
inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
|
|
|
|
ASSERT(sizeof(lchar) <= 2);
|
|
|
|
ASSERT(sizeof(rchar) <= 2);
|
|
|
|
if (sizeof(lchar) == 1) {
|
|
|
|
if (sizeof(rchar) == 1) {
|
|
|
|
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
|
|
|
|
reinterpret_cast<const uint8_t*>(rhs),
|
|
|
|
chars);
|
|
|
|
} else {
|
|
|
|
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
|
|
|
|
reinterpret_cast<const uint16_t*>(rhs),
|
|
|
|
chars);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (sizeof(rchar) == 1) {
|
|
|
|
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
|
|
|
|
reinterpret_cast<const uint8_t*>(rhs),
|
|
|
|
chars);
|
|
|
|
} else {
|
|
|
|
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
|
|
|
|
reinterpret_cast<const uint16_t*>(rhs),
|
|
|
|
chars);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-25 12:49:23 +00:00
|
|
|
|
2009-11-24 14:10:06 +00:00
|
|
|
// Calculate 10^exponent.
|
2011-11-29 10:56:11 +00:00
|
|
|
inline int TenToThe(int exponent) {
|
2010-11-05 08:18:53 +00:00
|
|
|
ASSERT(exponent <= 9);
|
|
|
|
ASSERT(exponent >= 1);
|
|
|
|
int answer = 10;
|
|
|
|
for (int i = 1; i < exponent; i++) answer *= 10;
|
|
|
|
return answer;
|
|
|
|
}
|
2009-11-24 14:10:06 +00:00
|
|
|
|
2010-03-12 10:20:01 +00:00
|
|
|
|
|
|
|
// The type-based aliasing rule allows the compiler to assume that pointers of
|
|
|
|
// different types (for some definition of different) never alias each other.
|
|
|
|
// Thus the following code does not work:
|
|
|
|
//
|
|
|
|
// float f = foo();
|
|
|
|
// int fbits = *(int*)(&f);
|
|
|
|
//
|
|
|
|
// The compiler 'knows' that the int pointer can't refer to f since the types
|
|
|
|
// don't match, so the compiler may cache f in a register, leaving random data
|
|
|
|
// in fbits. Using C++ style casts makes no difference, however a pointer to
|
|
|
|
// char data is assumed to alias any other pointer. This is the 'memcpy
|
|
|
|
// exception'.
|
|
|
|
//
|
|
|
|
// Bit_cast uses the memcpy exception to move the bits from a variable of one
|
|
|
|
// type of a variable of another type. Of course the end result is likely to
|
|
|
|
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
|
|
|
|
// will completely optimize BitCast away.
|
|
|
|
//
|
|
|
|
// There is an additional use for BitCast.
|
|
|
|
// Recent gccs will warn when they see casts that may result in breakage due to
|
|
|
|
// the type-based aliasing rule. If you have checked that there is no breakage
|
|
|
|
// you can use BitCast to cast one pointer type to another. This confuses gcc
|
|
|
|
// enough that it can no longer see that you have cast one pointer type to
|
|
|
|
// another thus avoiding the warning.
|
2011-02-01 20:47:53 +00:00
|
|
|
|
|
|
|
// We need different implementations of BitCast for pointer and non-pointer
|
|
|
|
// values. We use partial specialization of auxiliary struct to work around
|
|
|
|
// issues with template functions overloading.
|
2010-03-12 10:20:01 +00:00
|
|
|
template <class Dest, class Source>
|
2011-02-01 20:47:53 +00:00
|
|
|
struct BitCastHelper {
|
|
|
|
STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
|
2010-03-12 10:20:01 +00:00
|
|
|
|
2011-02-01 20:47:53 +00:00
|
|
|
INLINE(static Dest cast(const Source& source)) {
|
|
|
|
Dest dest;
|
2014-04-25 12:23:11 +00:00
|
|
|
memcpy(&dest, &source, sizeof(dest));
|
2011-02-01 20:47:53 +00:00
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
};
|
2010-03-12 10:20:01 +00:00
|
|
|
|
2010-08-11 10:52:34 +00:00
|
|
|
template <class Dest, class Source>
|
2011-02-01 20:47:53 +00:00
|
|
|
struct BitCastHelper<Dest, Source*> {
|
|
|
|
INLINE(static Dest cast(Source* source)) {
|
|
|
|
return BitCastHelper<Dest, uintptr_t>::
|
|
|
|
cast(reinterpret_cast<uintptr_t>(source));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-03-21 09:59:29 +00:00
|
|
|
template <class Dest, class Source>
|
|
|
|
INLINE(Dest BitCast(const Source& source));
|
|
|
|
|
2011-02-01 20:47:53 +00:00
|
|
|
template <class Dest, class Source>
|
|
|
|
inline Dest BitCast(const Source& source) {
|
|
|
|
return BitCastHelper<Dest, Source>::cast(source);
|
2010-08-11 10:52:34 +00:00
|
|
|
}
|
2010-03-30 12:01:06 +00:00
|
|
|
|
2011-06-10 12:09:48 +00:00
|
|
|
|
|
|
|
template<typename ElementType, int NumElements>
|
|
|
|
class EmbeddedContainer {
|
|
|
|
public:
|
|
|
|
EmbeddedContainer() : elems_() { }
|
|
|
|
|
2012-10-11 10:52:58 +00:00
|
|
|
int length() const { return NumElements; }
|
|
|
|
const ElementType& operator[](int i) const {
|
|
|
|
ASSERT(i < length());
|
|
|
|
return elems_[i];
|
|
|
|
}
|
2011-06-10 12:09:48 +00:00
|
|
|
ElementType& operator[](int i) {
|
|
|
|
ASSERT(i < length());
|
|
|
|
return elems_[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
ElementType elems_[NumElements];
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
template<typename ElementType>
|
|
|
|
class EmbeddedContainer<ElementType, 0> {
|
|
|
|
public:
|
2012-10-11 10:52:58 +00:00
|
|
|
int length() const { return 0; }
|
|
|
|
const ElementType& operator[](int i) const {
|
|
|
|
UNREACHABLE();
|
|
|
|
static ElementType t = 0;
|
|
|
|
return t;
|
|
|
|
}
|
2011-06-10 12:09:48 +00:00
|
|
|
ElementType& operator[](int i) {
|
|
|
|
UNREACHABLE();
|
|
|
|
static ElementType t = 0;
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-07-05 11:54:11 +00:00
|
|
|
// Helper class for building result strings in a character buffer. The
|
|
|
|
// purpose of the class is to use safe operations that checks the
|
|
|
|
// buffer bounds on all operations in debug mode.
|
|
|
|
// This simple base class does not allow formatted output.
|
|
|
|
class SimpleStringBuilder {
|
|
|
|
public:
|
|
|
|
// Create a string builder with a buffer of the given size. The
|
|
|
|
// buffer is allocated through NewArray<char> and must be
|
|
|
|
// deallocated by the caller of Finalize().
|
|
|
|
explicit SimpleStringBuilder(int size);
|
|
|
|
|
|
|
|
SimpleStringBuilder(char* buffer, int size)
|
|
|
|
: buffer_(buffer, size), position_(0) { }
|
|
|
|
|
|
|
|
~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
|
|
|
|
|
|
|
|
int size() const { return buffer_.length(); }
|
|
|
|
|
|
|
|
// Get the current position in the builder.
|
|
|
|
int position() const {
|
|
|
|
ASSERT(!is_finalized());
|
|
|
|
return position_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the position.
|
|
|
|
void Reset() { position_ = 0; }
|
|
|
|
|
|
|
|
// Add a single character to the builder. It is not allowed to add
|
|
|
|
// 0-characters; use the Finalize() method to terminate the string
|
|
|
|
// instead.
|
|
|
|
void AddCharacter(char c) {
|
|
|
|
ASSERT(c != '\0');
|
|
|
|
ASSERT(!is_finalized() && position_ < buffer_.length());
|
|
|
|
buffer_[position_++] = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add an entire string to the builder. Uses strlen() internally to
|
|
|
|
// compute the length of the input string.
|
|
|
|
void AddString(const char* s);
|
|
|
|
|
|
|
|
// Add the first 'n' characters of the given string 's' to the
|
|
|
|
// builder. The input string must have enough characters.
|
|
|
|
void AddSubstring(const char* s, int n);
|
|
|
|
|
|
|
|
// Add character padding to the builder. If count is non-positive,
|
|
|
|
// nothing is added to the builder.
|
|
|
|
void AddPadding(char c, int count);
|
|
|
|
|
|
|
|
// Add the decimal representation of the value.
|
|
|
|
void AddDecimalInteger(int value);
|
|
|
|
|
|
|
|
// Finalize the string by 0-terminating it and returning the buffer.
|
|
|
|
char* Finalize();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
Vector<char> buffer_;
|
|
|
|
int position_;
|
|
|
|
|
|
|
|
bool is_finalized() const { return position_ < 0; }
|
2011-09-08 19:57:14 +00:00
|
|
|
|
2011-07-05 11:54:11 +00:00
|
|
|
private:
|
|
|
|
DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
|
|
|
|
};
|
|
|
|
|
2011-07-21 13:06:55 +00:00
|
|
|
|
|
|
|
// A poor man's version of STL's bitset: A bit set of enums E (without explicit
|
|
|
|
// values), fitting into an integral type T.
|
|
|
|
template <class E, class T = int>
|
|
|
|
class EnumSet {
|
|
|
|
public:
|
|
|
|
explicit EnumSet(T bits = 0) : bits_(bits) {}
|
|
|
|
bool IsEmpty() const { return bits_ == 0; }
|
|
|
|
bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
|
2012-01-20 14:08:20 +00:00
|
|
|
bool ContainsAnyOf(const EnumSet& set) const {
|
|
|
|
return (bits_ & set.bits_) != 0;
|
|
|
|
}
|
2011-07-21 13:06:55 +00:00
|
|
|
void Add(E element) { bits_ |= Mask(element); }
|
2012-01-20 14:08:20 +00:00
|
|
|
void Add(const EnumSet& set) { bits_ |= set.bits_; }
|
2011-07-21 13:06:55 +00:00
|
|
|
void Remove(E element) { bits_ &= ~Mask(element); }
|
2012-01-20 14:08:20 +00:00
|
|
|
void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
|
|
|
|
void RemoveAll() { bits_ = 0; }
|
|
|
|
void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
|
2011-07-21 13:06:55 +00:00
|
|
|
T ToIntegral() const { return bits_; }
|
2012-01-20 14:08:20 +00:00
|
|
|
bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
|
2013-05-16 10:59:17 +00:00
|
|
|
bool operator!=(const EnumSet& set) { return bits_ != set.bits_; }
|
2013-04-10 08:29:39 +00:00
|
|
|
EnumSet<E, T> operator|(const EnumSet& set) const {
|
|
|
|
return EnumSet<E, T>(bits_ | set.bits_);
|
|
|
|
}
|
2011-07-21 13:06:55 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
T Mask(E element) const {
|
|
|
|
// The strange typing in ASSERT is necessary to avoid stupid warnings, see:
|
|
|
|
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
|
2012-09-26 14:42:08 +00:00
|
|
|
ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
|
2013-11-06 13:20:14 +00:00
|
|
|
return static_cast<T>(1) << element;
|
2011-07-21 13:06:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
T bits_;
|
|
|
|
};
|
|
|
|
|
2014-02-12 09:19:30 +00:00
|
|
|
// Bit field extraction.
|
|
|
|
inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
|
|
|
|
return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
|
|
|
|
return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) {
|
|
|
|
return (x << (31 - msb)) >> (lsb + 31 - msb);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline int signed_bitextract_64(int msb, int lsb, int x) {
|
|
|
|
// TODO(jbramley): This is broken for big bitfields.
|
|
|
|
return (x << (63 - msb)) >> (lsb + 63 - msb);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check number width.
|
|
|
|
inline bool is_intn(int64_t x, unsigned n) {
|
|
|
|
ASSERT((0 < n) && (n < 64));
|
|
|
|
int64_t limit = static_cast<int64_t>(1) << (n - 1);
|
|
|
|
return (-limit <= x) && (x < limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool is_uintn(int64_t x, unsigned n) {
|
|
|
|
ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
|
|
|
|
return !(x >> n);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class T>
|
|
|
|
inline T truncate_to_intn(T x, unsigned n) {
|
|
|
|
ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
|
|
|
|
return (x & ((static_cast<T>(1) << n) - 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define INT_1_TO_63_LIST(V) \
|
|
|
|
V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
|
|
|
|
V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
|
|
|
|
V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
|
|
|
|
V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
|
|
|
|
V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
|
|
|
|
V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
|
|
|
|
V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
|
|
|
|
V(57) V(58) V(59) V(60) V(61) V(62) V(63)
|
|
|
|
|
|
|
|
#define DECLARE_IS_INT_N(N) \
|
|
|
|
inline bool is_int##N(int64_t x) { return is_intn(x, N); }
|
|
|
|
#define DECLARE_IS_UINT_N(N) \
|
|
|
|
template <class T> \
|
|
|
|
inline bool is_uint##N(T x) { return is_uintn(x, N); }
|
|
|
|
#define DECLARE_TRUNCATE_TO_INT_N(N) \
|
|
|
|
template <class T> \
|
|
|
|
inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
|
|
|
|
INT_1_TO_63_LIST(DECLARE_IS_INT_N)
|
|
|
|
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
|
|
|
|
INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
|
|
|
|
#undef DECLARE_IS_INT_N
|
|
|
|
#undef DECLARE_IS_UINT_N
|
|
|
|
#undef DECLARE_TRUNCATE_TO_INT_N
|
2012-08-06 14:13:09 +00:00
|
|
|
|
|
|
|
class TypeFeedbackId {
|
|
|
|
public:
|
|
|
|
explicit TypeFeedbackId(int id) : id_(id) { }
|
|
|
|
int ToInt() const { return id_; }
|
|
|
|
|
|
|
|
static TypeFeedbackId None() { return TypeFeedbackId(kNoneId); }
|
|
|
|
bool IsNone() const { return id_ == kNoneId; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
static const int kNoneId = -1;
|
|
|
|
|
|
|
|
int id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class BailoutId {
|
|
|
|
public:
|
|
|
|
explicit BailoutId(int id) : id_(id) { }
|
|
|
|
int ToInt() const { return id_; }
|
|
|
|
|
|
|
|
static BailoutId None() { return BailoutId(kNoneId); }
|
|
|
|
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
|
|
|
|
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
|
|
|
|
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
|
2012-12-18 16:25:45 +00:00
|
|
|
static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
|
2012-08-06 14:13:09 +00:00
|
|
|
|
|
|
|
bool IsNone() const { return id_ == kNoneId; }
|
|
|
|
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
|
2013-12-18 10:38:58 +00:00
|
|
|
bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
|
2012-08-06 14:13:09 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
static const int kNoneId = -1;
|
|
|
|
|
|
|
|
// Using 0 could disguise errors.
|
|
|
|
static const int kFunctionEntryId = 2;
|
|
|
|
|
|
|
|
// This AST id identifies the point after the declarations have been visited.
|
|
|
|
// We need it to capture the environment effects of declarations that emit
|
|
|
|
// code (function declarations).
|
|
|
|
static const int kDeclarationsId = 3;
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
// Every FunctionState starts with this id.
|
2012-08-06 14:13:09 +00:00
|
|
|
static const int kFirstUsableId = 4;
|
|
|
|
|
2012-12-18 16:25:45 +00:00
|
|
|
// Every compiled stub starts with this id.
|
|
|
|
static const int kStubEntryId = 5;
|
|
|
|
|
2012-08-06 14:13:09 +00:00
|
|
|
int id_;
|
|
|
|
};
|
|
|
|
|
2014-02-07 11:55:11 +00:00
|
|
|
|
|
|
|
template <class C>
|
|
|
|
class ContainerPointerWrapper {
|
|
|
|
public:
|
|
|
|
typedef typename C::iterator iterator;
|
|
|
|
typedef typename C::reverse_iterator reverse_iterator;
|
|
|
|
explicit ContainerPointerWrapper(C* container) : container_(container) {}
|
|
|
|
iterator begin() { return container_->begin(); }
|
|
|
|
iterator end() { return container_->end(); }
|
|
|
|
reverse_iterator rbegin() { return container_->rbegin(); }
|
|
|
|
reverse_iterator rend() { return container_->rend(); }
|
|
|
|
private:
|
|
|
|
C* container_;
|
|
|
|
};
|
|
|
|
|
2014-04-25 13:47:01 +00:00
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// I/O support.
|
|
|
|
|
|
|
|
#if __GNUC__ >= 4
|
|
|
|
// On gcc we can ask the compiler to check the types of %d-style format
|
|
|
|
// specifiers and their associated arguments. TODO(erikcorry) fix this
|
|
|
|
// so it works on MacOSX.
|
|
|
|
#if defined(__MACH__) && defined(__APPLE__)
|
|
|
|
#define PRINTF_CHECKING
|
|
|
|
#define FPRINTF_CHECKING
|
|
|
|
#define PRINTF_METHOD_CHECKING
|
|
|
|
#define FPRINTF_METHOD_CHECKING
|
|
|
|
#else // MacOsX.
|
|
|
|
#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
|
|
|
|
#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
|
|
|
|
#define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
|
|
|
|
#define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
#define PRINTF_CHECKING
|
|
|
|
#define FPRINTF_CHECKING
|
|
|
|
#define PRINTF_METHOD_CHECKING
|
|
|
|
#define FPRINTF_METHOD_CHECKING
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Our version of printf().
|
|
|
|
void PRINTF_CHECKING PrintF(const char* format, ...);
|
|
|
|
void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
|
|
|
|
|
|
|
|
// Prepends the current process ID to the output.
|
|
|
|
void PRINTF_CHECKING PrintPID(const char* format, ...);
|
|
|
|
|
2014-06-13 16:43:27 +00:00
|
|
|
// Safe formatting print. Ensures that str is always null-terminated.
|
|
|
|
// Returns the number of chars written, or -1 if output was truncated.
|
|
|
|
int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...);
|
|
|
|
int VSNPrintF(Vector<char> str, const char* format, va_list args);
|
|
|
|
|
|
|
|
void StrNCpy(Vector<char> dest, const char* src, size_t n);
|
|
|
|
|
2014-04-25 13:47:01 +00:00
|
|
|
// Our version of fflush.
|
|
|
|
void Flush(FILE* out);
|
|
|
|
|
|
|
|
inline void Flush() {
|
|
|
|
Flush(stdout);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Read a line of characters after printing the prompt to stdout. The resulting
|
|
|
|
// char* needs to be disposed off with DeleteArray by the caller.
|
|
|
|
char* ReadLine(const char* prompt);
|
|
|
|
|
|
|
|
|
|
|
|
// Read and return the raw bytes in a file. the size of the buffer is returned
|
|
|
|
// in size.
|
|
|
|
// The returned buffer must be freed by the caller.
|
|
|
|
byte* ReadBytes(const char* filename, int* size, bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
// Append size chars from str to the file given by filename.
|
|
|
|
// The file is overwritten. Returns the number of chars written.
|
|
|
|
int AppendChars(const char* filename,
|
|
|
|
const char* str,
|
|
|
|
int size,
|
|
|
|
bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
// Write size chars from str to the file given by filename.
|
|
|
|
// The file is overwritten. Returns the number of chars written.
|
|
|
|
int WriteChars(const char* filename,
|
|
|
|
const char* str,
|
|
|
|
int size,
|
|
|
|
bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
// Write size bytes to the file given by filename.
|
|
|
|
// The file is overwritten. Returns the number of bytes written.
|
|
|
|
int WriteBytes(const char* filename,
|
|
|
|
const byte* bytes,
|
|
|
|
int size,
|
|
|
|
bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
// Write the C code
|
|
|
|
// const char* <varname> = "<str>";
|
|
|
|
// const int <varname>_len = <len>;
|
|
|
|
// to the file given by filename. Only the first len chars are written.
|
|
|
|
int WriteAsCFile(const char* filename, const char* varname,
|
|
|
|
const char* str, int size, bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Data structures
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
|
|
|
|
int length) {
|
|
|
|
return Vector< Handle<Object> >(
|
|
|
|
reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
// Memory
|
|
|
|
|
|
|
|
// Copies words from |src| to |dst|. The data spans must not overlap.
|
|
|
|
template <typename T>
|
|
|
|
inline void CopyWords(T* dst, const T* src, size_t num_words) {
|
|
|
|
STATIC_ASSERT(sizeof(T) == kPointerSize);
|
2014-05-07 15:28:30 +00:00
|
|
|
// TODO(mvstanton): disabled because mac builds are bogus failing on this
|
|
|
|
// assert. They are doing a signed comparison. Investigate in
|
|
|
|
// the morning.
|
|
|
|
// ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
|
|
|
|
// Max(dst, const_cast<T*>(src)));
|
2014-04-25 13:47:01 +00:00
|
|
|
ASSERT(num_words > 0);
|
|
|
|
|
2014-05-27 07:57:22 +00:00
|
|
|
// Use block copying MemCopy if the segment we're copying is
|
2014-04-25 13:47:01 +00:00
|
|
|
// enough to justify the extra call/setup overhead.
|
|
|
|
static const size_t kBlockCopyLimit = 16;
|
|
|
|
|
|
|
|
if (num_words < kBlockCopyLimit) {
|
|
|
|
do {
|
|
|
|
num_words--;
|
|
|
|
*dst++ = *src++;
|
|
|
|
} while (num_words > 0);
|
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dst, src, num_words * kPointerSize);
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Copies words from |src| to |dst|. No restrictions.
|
|
|
|
template <typename T>
|
|
|
|
inline void MoveWords(T* dst, const T* src, size_t num_words) {
|
|
|
|
STATIC_ASSERT(sizeof(T) == kPointerSize);
|
|
|
|
ASSERT(num_words > 0);
|
|
|
|
|
2014-05-27 07:57:22 +00:00
|
|
|
// Use block copying MemCopy if the segment we're copying is
|
2014-04-25 13:47:01 +00:00
|
|
|
// enough to justify the extra call/setup overhead.
|
|
|
|
static const size_t kBlockCopyLimit = 16;
|
|
|
|
|
|
|
|
if (num_words < kBlockCopyLimit &&
|
|
|
|
((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
|
|
|
|
T* end = dst + num_words;
|
|
|
|
do {
|
|
|
|
num_words--;
|
|
|
|
*dst++ = *src++;
|
|
|
|
} while (num_words > 0);
|
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemMove(dst, src, num_words * kPointerSize);
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Copies data from |src| to |dst|. The data spans must not overlap.
|
|
|
|
template <typename T>
|
|
|
|
inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
|
|
|
|
STATIC_ASSERT(sizeof(T) == 1);
|
|
|
|
ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
|
|
|
|
Max(dst, const_cast<T*>(src)));
|
|
|
|
if (num_bytes == 0) return;
|
|
|
|
|
2014-05-27 07:57:22 +00:00
|
|
|
// Use block copying MemCopy if the segment we're copying is
|
2014-04-25 13:47:01 +00:00
|
|
|
// enough to justify the extra call/setup overhead.
|
2014-05-27 07:57:22 +00:00
|
|
|
static const int kBlockCopyLimit = kMinComplexMemCopy;
|
2014-04-25 13:47:01 +00:00
|
|
|
|
|
|
|
if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
|
|
|
|
do {
|
|
|
|
num_bytes--;
|
|
|
|
*dst++ = *src++;
|
|
|
|
} while (num_bytes > 0);
|
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dst, src, num_bytes);
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T, typename U>
|
|
|
|
inline void MemsetPointer(T** dest, U* value, int counter) {
|
|
|
|
#ifdef DEBUG
|
|
|
|
T* a = NULL;
|
|
|
|
U* b = NULL;
|
|
|
|
a = b; // Fake assignment to check assignability.
|
|
|
|
USE(a);
|
|
|
|
#endif // DEBUG
|
|
|
|
#if V8_HOST_ARCH_IA32
|
|
|
|
#define STOS "stosl"
|
|
|
|
#elif V8_HOST_ARCH_X64
|
|
|
|
#define STOS "stosq"
|
|
|
|
#endif
|
|
|
|
#if defined(__native_client__)
|
|
|
|
// This STOS sequence does not validate for x86_64 Native Client.
|
|
|
|
// Here we #undef STOS to force use of the slower C version.
|
|
|
|
// TODO(bradchen): Profile V8 and implement a faster REP STOS
|
|
|
|
// here if the profile indicates it matters.
|
|
|
|
#undef STOS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(MEMORY_SANITIZER)
|
|
|
|
// MemorySanitizer does not understand inline assembly.
|
|
|
|
#undef STOS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__GNUC__) && defined(STOS)
|
|
|
|
asm volatile(
|
|
|
|
"cld;"
|
|
|
|
"rep ; " STOS
|
|
|
|
: "+&c" (counter), "+&D" (dest)
|
|
|
|
: "a" (value)
|
|
|
|
: "memory", "cc");
|
|
|
|
#else
|
|
|
|
for (int i = 0; i < counter; i++) {
|
|
|
|
dest[i] = value;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#undef STOS
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Simple wrapper that allows an ExternalString to refer to a
|
|
|
|
// Vector<const char>. Doesn't assume ownership of the data.
|
|
|
|
class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
|
|
|
|
public:
|
|
|
|
explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
|
|
|
|
|
|
|
|
virtual const char* data() const { return data_.start(); }
|
|
|
|
|
|
|
|
virtual size_t length() const { return data_.length(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
Vector<const char> data_;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Simple support to read a file into a 0-terminated C-string.
|
|
|
|
// The returned buffer must be freed by the caller.
|
|
|
|
// On return, *exits tells whether the file existed.
|
|
|
|
Vector<const char> ReadFile(const char* filename,
|
|
|
|
bool* exists,
|
|
|
|
bool verbose = true);
|
|
|
|
Vector<const char> ReadFile(FILE* file,
|
|
|
|
bool* exists,
|
|
|
|
bool verbose = true);
|
|
|
|
|
|
|
|
|
|
|
|
template <typename sourcechar, typename sinkchar>
|
|
|
|
INLINE(static void CopyCharsUnsigned(sinkchar* dest,
|
|
|
|
const sourcechar* src,
|
|
|
|
int chars));
|
|
|
|
#if defined(V8_HOST_ARCH_ARM)
|
|
|
|
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
|
|
|
|
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
|
|
|
|
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
|
|
|
|
#elif defined(V8_HOST_ARCH_MIPS)
|
|
|
|
INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
|
|
|
|
INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Copy from ASCII/16bit chars to ASCII/16bit chars.
|
|
|
|
template <typename sourcechar, typename sinkchar>
|
|
|
|
INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
|
|
|
|
|
|
|
|
template<typename sourcechar, typename sinkchar>
|
|
|
|
void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
|
|
|
|
ASSERT(sizeof(sourcechar) <= 2);
|
|
|
|
ASSERT(sizeof(sinkchar) <= 2);
|
|
|
|
if (sizeof(sinkchar) == 1) {
|
|
|
|
if (sizeof(sourcechar) == 1) {
|
|
|
|
CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
|
|
|
|
reinterpret_cast<const uint8_t*>(src),
|
|
|
|
chars);
|
|
|
|
} else {
|
|
|
|
CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
|
|
|
|
reinterpret_cast<const uint16_t*>(src),
|
|
|
|
chars);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (sizeof(sourcechar) == 1) {
|
|
|
|
CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
|
|
|
|
reinterpret_cast<const uint8_t*>(src),
|
|
|
|
chars);
|
|
|
|
} else {
|
|
|
|
CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
|
|
|
|
reinterpret_cast<const uint16_t*>(src),
|
|
|
|
chars);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename sourcechar, typename sinkchar>
|
|
|
|
void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
|
|
|
|
sinkchar* limit = dest + chars;
|
|
|
|
#ifdef V8_HOST_CAN_READ_UNALIGNED
|
|
|
|
if (sizeof(*dest) == sizeof(*src)) {
|
2014-05-27 07:57:22 +00:00
|
|
|
if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
|
|
|
|
MemCopy(dest, src, chars * sizeof(*dest));
|
2014-04-25 13:47:01 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Number of characters in a uintptr_t.
|
|
|
|
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
|
|
|
|
ASSERT(dest + kStepSize > dest); // Check for overflow.
|
|
|
|
while (dest + kStepSize <= limit) {
|
|
|
|
*reinterpret_cast<uintptr_t*>(dest) =
|
|
|
|
*reinterpret_cast<const uintptr_t*>(src);
|
|
|
|
dest += kStepSize;
|
|
|
|
src += kStepSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
while (dest < limit) {
|
|
|
|
*dest++ = static_cast<sinkchar>(*src++);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if defined(V8_HOST_ARCH_ARM)
|
|
|
|
void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
|
|
|
|
switch (static_cast<unsigned>(chars)) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
*dest = *src;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
memcpy(dest, src, 2);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
memcpy(dest, src, 3);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
memcpy(dest, src, 4);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
memcpy(dest, src, 5);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
memcpy(dest, src, 6);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
memcpy(dest, src, 7);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
memcpy(dest, src, 8);
|
|
|
|
break;
|
|
|
|
case 9:
|
|
|
|
memcpy(dest, src, 9);
|
|
|
|
break;
|
|
|
|
case 10:
|
|
|
|
memcpy(dest, src, 10);
|
|
|
|
break;
|
|
|
|
case 11:
|
|
|
|
memcpy(dest, src, 11);
|
|
|
|
break;
|
|
|
|
case 12:
|
|
|
|
memcpy(dest, src, 12);
|
|
|
|
break;
|
|
|
|
case 13:
|
|
|
|
memcpy(dest, src, 13);
|
|
|
|
break;
|
|
|
|
case 14:
|
|
|
|
memcpy(dest, src, 14);
|
|
|
|
break;
|
|
|
|
case 15:
|
|
|
|
memcpy(dest, src, 15);
|
|
|
|
break;
|
|
|
|
default:
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dest, src, chars);
|
2014-04-25 13:47:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars) {
|
2014-05-27 07:57:22 +00:00
|
|
|
if (chars >= kMinComplexConvertMemCopy) {
|
|
|
|
MemCopyUint16Uint8(dest, src, chars);
|
2014-04-25 13:47:01 +00:00
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopyUint16Uint8Wrapper(dest, src, chars);
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
|
|
|
|
switch (static_cast<unsigned>(chars)) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
*dest = *src;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
memcpy(dest, src, 4);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
memcpy(dest, src, 6);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
memcpy(dest, src, 8);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
memcpy(dest, src, 10);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
memcpy(dest, src, 12);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
memcpy(dest, src, 14);
|
|
|
|
break;
|
|
|
|
default:
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dest, src, chars * sizeof(*dest));
|
2014-04-25 13:47:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#elif defined(V8_HOST_ARCH_MIPS)
|
|
|
|
void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
|
2014-05-27 07:57:22 +00:00
|
|
|
if (chars < kMinComplexMemCopy) {
|
2014-04-25 13:47:01 +00:00
|
|
|
memcpy(dest, src, chars);
|
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dest, src, chars);
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
|
2014-05-27 07:57:22 +00:00
|
|
|
if (chars < kMinComplexMemCopy) {
|
2014-04-25 13:47:01 +00:00
|
|
|
memcpy(dest, src, chars * sizeof(*dest));
|
|
|
|
} else {
|
2014-05-27 07:57:22 +00:00
|
|
|
MemCopy(dest, src, chars * sizeof(*dest));
|
2014-04-25 13:47:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
class StringBuilder : public SimpleStringBuilder {
|
|
|
|
public:
|
|
|
|
explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
|
|
|
|
StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
|
|
|
|
|
|
|
|
// Add formatted contents to the builder just like printf().
|
|
|
|
void AddFormatted(const char* format, ...);
|
|
|
|
|
|
|
|
// Add formatted contents like printf based on a va_list.
|
|
|
|
void AddFormattedList(const char* format, va_list list);
|
|
|
|
private:
|
|
|
|
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-08-11 10:52:34 +00:00
|
|
|
} } // namespace v8::internal
|
2010-04-14 07:26:20 +00:00
|
|
|
|
2008-07-03 15:10:15 +00:00
|
|
|
#endif // V8_UTILS_H_
|