/* ******************************************************************************* * * Copyright (C) 1999-2001, International Business Machines * Corporation and others. All Rights Reserved. * ******************************************************************************* * file name: utf.h * encoding: US-ASCII * tab size: 8 (not used) * indentation:4 * * created on: 1999sep09 * created by: Markus W. Scherer */ /** * \file * \brief C API: UChar and UChar32 data types and UTF macros for C Unicode string handling * *
This file defines the UChar and UChar32 data types for Unicode code units * and code points, as well as macros for efficiently getting code points * in and out of a string.
* *utf.h is included by utypes.h and itself includes the utfXX.h after some * common definitions. Those files define the macros for each UTF-size.
* *The original concept for these files was for ICU to allow * in principle to set which UTF (UTF-8/16/32) is used internally * by defining UTF_SIZE to either 8, 16, or 32. utf.h would then define the UChar type * accordingly. UTF-16 was the default.
* *This concept has been abandoned. * A lot of the ICU source code — especially low-level code like * conversion, normalization, and collation — assumes UTF-16, * utf.h enforces the default of UTF-16. * The UTF-8 and UTF-32 macros remain for now for completeness and backward compatibility.
* *Accordingly, utf.h defines UChar to be an unsigned 16-bit integer. If this matches wchar_t, then * UChar is defined to be exactly wchar_t, otherwise uint16_t.
* *UChar32 is always defined to be a 32-bit integer to be large enough for a 21-bit * Unicode code point (Unicode scalar value, 0..0x10ffff). If wchar_t is a 32-bit type, then * UChar32 is defined to be exactly wchar_t, regardless of whether wchar_t is signed or unsigned. * This means that UChar32 may be signed or unsigned depending on the platform! * If wchar_t is not a 32-bit type, then UChar32 is defined to be uint32_t.
* *utf.h also defines a number of C macros for handling single Unicode code points and * for using UTF Unicode strings. It includes utf8.h, utf16.h, and utf32.h for the actual * implementations of those macros and then aliases one set of them (for UTF-16) for general use. * The UTF-specific macros have the UTF size in the macro name prefixes (UTF16_...), while * the general alias macros always begin with UTF_...
* *Many string operations can be done with or without error checking.
* Where such a distinction is useful, there are two versions of the macros, "unsafe" and "safe"
* ones with ..._UNSAFE and ..._SAFE suffixes. The unsafe macros are fast but may cause
* program failures if the strings are not well-formed. The safe macros have an additional, boolean
* parameter "strict". If strict is FALSE, then only illegal sequences are detected.
* Otherwise, irregular sequences and non-characters are detected as well (like single surrogates).
* Safe macros return special error code points for illegal/irregular sequences:
* Typically, U+ffff, or values that would result in a code unit sequence of the same length
* as the erroneous input sequence.
* Note that _UNSAFE macros have fewer parameters: They do not have the strictness parameter, and
* they do not have start/length parameters for boundary checking.
Here, the macros are aliased in two steps: * In the first step, the UTF-specific macros with UTF16_ prefix and _UNSAFE and _SAFE suffixes are * aliased according to the UTF_SIZE to macros with UTF_ prefix and the same suffixes and signatures. * Then, in a second step, the default, general alias macros are set to use either the unsafe or * the safe/not strict (default) or the safe/strict macro; * these general macros do not have a strictness parameter.
* *It is possible to change the default choice for the general alias macros to be unsafe, safe/not strict or safe/strict. * The default is safe/not strict. It is not recommended to select the unsafe macros as the basis for * Unicode string handling in ICU! To select this, define UTF_SAFE, UTF_STRICT, or UTF_UNSAFE.
* *For general use, one should use the default, general macros with UTF_ prefix and no _SAFE/_UNSAFE suffix. * Only in some cases it may be necessary to control the choice of macro directly and use a less generic alias. * For example, if it can be assumed that a string is well-formed and the index will stay within the bounds, * then the _UNSAFE version may be used. * If a UTF-8 string is to be processed, then the macros with UTF8_ prefixes need to be used.
*Usage: ICU coding guidelines for if() statements should be followed when using these macros. * Compound statements (curly braces {}) must be used for if-else-while... * bodies and all macro statements should be terminated with semicolon.
*/ #ifndef __UTF_H__ #define __UTF_H__ /* * ANSI C headers: * stddef.h defines wchar_t */ #includeUTF8_ERROR_VALUE_1 and UTF8_ERROR_VALUE_2 are special error values for UTF-8,
* which need 1 or 2 bytes in UTF-8:
* U+0015 = NAK = Negative Acknowledge, C0 control character
* U+009f = highest C1 control character
These are used by ("safe") UTF-8 macros so that they can return an error value * that needs the same number of code units (bytes) as were seen by * a macro. They should be tested with UTF_IS_ERROR() or UTF_IS_VALID().
* * @internal */ #define UTF8_ERROR_VALUE_1 0x15 #define UTF8_ERROR_VALUE_2 0x9f /** * Error value for all UTFs. This code point value will be set by macros with error * checking if an error is detected. */ #define UTF_ERROR_VALUE 0xffff /* single-code point definitions -------------------------------------------- */ /** Is this code unit or code point a surrogate (U+d800..U+dfff)? */ #define UTF_IS_SURROGATE(uchar) (((uchar)&0xfffff800)==0xd800) /** * Is a given 32-bit code point/Unicode scalar value * actually a valid Unicode (abstract) character? * * Non-characters include: * - single surrogate code points (U+d800..U+dfff, 2048 code points) * - the last two code points on each plane (U+__fffe and U+__ffff, 34 code points) * - U+fdd0..U+fdef (new with Unicode 3.1, 32 code points) * - the highest Unicode code point value is U+10ffff * * This means that all code points below U+d800 are character code points, * and that boundary is tested first for performance. */ #define UTF_IS_UNICODE_CHAR(c) \ ((uint32_t)(c)<0xd800 || \ ((uint32_t)(c)>0xdfff && \ (uint32_t)(c)<=0x10ffff && \ ((c)&0xfffe)!=0xfffe && \ !(0xfdd0<=(uint32_t)(c) && (uint32_t)(c)<=0xfdef))) /** * Is a given 32-bit code an error value * as returned by one of the macros for any UTF? */ #define UTF_IS_ERROR(c) \ (((c)&0xfffe)==0xfffe || (c)==UTF8_ERROR_VALUE_1 || (c)==UTF8_ERROR_VALUE_2) /** This is a combined macro: Is c a valid Unicode value _and_ not an error code? */ #define UTF_IS_VALID(c) \ (UTF_IS_UNICODE_CHAR(c) && \ (c)!=UTF8_ERROR_VALUE_1 && (c)!=UTF8_ERROR_VALUE_2) /* include the utfXX.h ------------------------------------------------------ */ #include "unicode/utf8.h" #include "unicode/utf16.h" #include "unicode/utf32.h" /* Define types and macros according to the selected UTF size. -------------- */ /*! * \var UChar * Define UChar to be wchar_t if that is 16 bits wide; always assumed to be unsigned. * If wchar_t is not 16 bits wide, then define UChar to be uint16_t. */ #if UTF_SIZE==8 # error UTF-8 is not implemented, undefine UTF_SIZE or define it to 16 /* * ANSI C header: * limits.h defines CHAR_MAX */ # include