Compare commits

...

29 Commits

Author SHA1 Message Date
Doug Simon
d354141aa1 8318694: [JVMCI] disable can_call_java in most contexts for libjvmci compiler threads
Reviewed-by: dholmes, never
2023-11-01 16:27:04 +00:00
Jonathan Gibbons
c86592d38d 8319046: Execute tests in source/class-file order in JavadocTester
Reviewed-by: hannesw
2023-11-01 15:48:31 +00:00
Jonathan Gibbons
3660a90ad8 8319139: Improve diagnosability of JavadocTester output
Reviewed-by: hannesw
2023-11-01 15:33:53 +00:00
Alexey Ivanov
7f47c51ace 8316025: Use testUI() method of PassFailJFrame.Builder in FileChooserSymLinkTest.java
Reviewed-by: azvegint
2023-11-01 15:27:05 +00:00
Aggelos Biboudis
36de19d462 8317048: VerifyError with unnamed pattern variable and more than one components
Reviewed-by: jlahoda
2023-11-01 13:38:10 +00:00
Albert Mingkun Yang
ab1934848b 8318647: Serial: Refactor BlockOffsetTable
Reviewed-by: tschatzl, iwalulya
2023-11-01 11:50:52 +00:00
Julian Waters
b4f5379d50 8304939: os::win32::exit_process_or_thread should be marked noreturn
Reviewed-by: dholmes, kbarrett
2023-11-01 10:42:23 +00:00
Thomas Stuefe
0461d9a7d6 8318016: Per-compilation memory ceiling
Reviewed-by: roland, thartmann
2023-11-01 08:12:39 +00:00
Jasmine Karthikeyan
2a76ad975c 8318683: compiler/c2/irTests/TestPhiDuplicatedConversion.java "Failed IR Rules (2) of Methods (2)"
Reviewed-by: thartmann, kvn
2023-11-01 06:12:55 +00:00
Jonathan Gibbons
b3fec6b5f3 8306980: Generated docs should contain correct Legal Documents
Reviewed-by: ihse, mchung
2023-10-31 22:03:35 +00:00
Calvin Cheung
11394828b3 8316132: CDSProtectionDomain::get_shared_protection_domain should check for exception
Reviewed-by: dholmes, iklam
2023-10-31 20:56:18 +00:00
Harshitha Onkar
2182c93689 8313643: Update HarfBuzz to 8.2.2
Reviewed-by: jdv, prr, dnguyen
2023-10-31 19:01:15 +00:00
Renjith Kannath Pariyangad
613a3cc689 8301846: Invalid TargetDataLine after screen lock when using JFileChooser or COM library
Reviewed-by: serb, aivanov
2023-10-31 18:20:38 +00:00
Renjith Kannath Pariyangad
613d32c282 8169475: WheelModifier.java fails by timeout
Reviewed-by: dmarkov, aivanov
2023-10-31 18:20:22 +00:00
Adam Sotona
f1e8787393 8317609: Classfile API fails to verify /jdk.jcmd/sun/tools/jstat/Alignment.class
Reviewed-by: liach, alanb
2023-10-31 16:13:41 +00:00
Daniel Jeliński
47624f6fc6 8299058: AssertionError in sun.net.httpserver.ServerImpl when connection is idle
Reviewed-by: jpai, michaelm, dfuchs
2023-10-31 15:53:28 +00:00
Erik Gahlin
2d5829afbc 8239508: JFR: @RemoveFields
Reviewed-by: mgronlun
2023-10-31 15:36:12 +00:00
Hai-May Chao
0064cf90ff 8311596: Add separate system properties for TLS server and client for maximum chain length
Reviewed-by: jnimeh, weijun, mullan
2023-10-31 15:22:18 +00:00
Conor Cleary
3a7525d5c3 8309118: HttpClient: Add more tests for 100 ExpectContinue with HTTP/2
Reviewed-by: dfuchs, djelinski
2023-10-31 14:45:14 +00:00
Albert Mingkun Yang
f4c5db92ea 8318908: Parallel: Remove ExtendedCardValue
Reviewed-by: tschatzl, sjohanss
2023-10-31 14:23:18 +00:00
Doug Simon
7452d50be5 8318940: [JVMCI] do not set HotSpotNmethod oop for a default HotSpotNmethod
Reviewed-by: thartmann, never
2023-10-31 14:16:38 +00:00
Matthew Donovan
3e39d7b34c 8319136: Skip pkcs11 tests on linux-aarch64
Reviewed-by: rhalade
2023-10-31 13:28:41 +00:00
Andrew Haley
ee6f25b507 8319120: Unbound ScopedValue.get() throws the wrong exception
Reviewed-by: alanb
2023-10-31 12:58:56 +00:00
Viktor Klang
e05cafda78 8318467: [jmh] tests concurrent.Queues and concurrent.ProducerConsumer hang with 101+ threads
Reviewed-by: alanb
2023-10-31 11:14:08 +00:00
Markus Grönlund
d3c3f0e7c8 8317951: Refactor loading of zip library to help resolve JDK-8315220
Reviewed-by: egahlin, dholmes
2023-10-31 10:02:40 +00:00
Conor Cleary
576c9bccfb 8318492: Http2ClientImpl should attempt to close and remove connection in stop()
Reviewed-by: dfuchs, jpai
2023-10-31 09:58:26 +00:00
Albert Mingkun Yang
5411ad2a5c 8319106: Remove unimplemented TaskTerminator::do_delay_step
Reviewed-by: tschatzl
2023-10-31 09:14:57 +00:00
Jayathirth D V
75ce02fe74 8318951: Additional negative value check in JPEG decoding
Reviewed-by: azvegint, prr
2023-10-31 04:20:23 +00:00
Andrew John Hughes
328b381075 8009550: PlatformPCSC should load versioned so
Reviewed-by: valeriep, stuefe
2023-10-31 02:06:41 +00:00
317 changed files with 14262 additions and 7367 deletions

View File

@@ -1053,6 +1053,9 @@ else
# All modules include the main license files from java.base.
$(JMOD_TARGETS): java.base-copy
# jdk.javadoc uses an internal copy of the main license files from java.base.
jdk.javadoc-copy: java.base-copy
zip-security: $(filter jdk.crypto%, $(JAVA_TARGETS))
ifeq ($(ENABLE_GENERATE_CLASSLIST), true)

View File

@@ -495,6 +495,11 @@ else
# hb-ft.cc is not presently needed, and requires freetype 2.4.2 or later.
LIBFONTMANAGER_EXCLUDE_FILES += libharfbuzz/hb-ft.cc
# list of disabled warnings and the compilers for which it was specifically added.
# array-bounds -> GCC 12 on Alpine Linux
# parentheses -> GCC 6
# range-loop-analysis -> clang on Xcode12
HARFBUZZ_DISABLED_WARNINGS_gcc := missing-field-initializers strict-aliasing \
unused-result array-bounds parentheses
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.

View File

@@ -0,0 +1,47 @@
#
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include CopyCommon.gmk
JDK_JAVADOC_DIR := $(JDK_OUTPUTDIR)/modules/jdk.javadoc
JDK_JAVADOC_DOCLET_RESOURCE_DIR := $(JDK_JAVADOC_DIR)/jdk/javadoc/internal/doclets/formats/html/resources
################################################################################
$(eval $(call SetupCopyFiles, COPY_JAVADOC_MODULE_LEGAL_RESOURCES, \
DEST := $(JDK_JAVADOC_DOCLET_RESOURCE_DIR)/legal, \
FILES := $(wildcard $(MODULE_SRC)/share/legal/*.md), \
))
TARGETS += $(COPY_JAVADOC_MODULE_LEGAL_RESOURCES)
################################################################################
$(eval $(call SetupCopyFiles, COPY_JAVADOC_COMMON_LEGAL_RESOURCES, \
DEST := $(JDK_JAVADOC_DOCLET_RESOURCE_DIR)/legal, \
FILES := $(wildcard $(COMMON_LEGAL_DST_DIR)/*), \
))
TARGETS += $(COPY_JAVADOC_COMMON_LEGAL_RESOURCES)
################################################################################

View File

@@ -505,12 +505,17 @@ struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
return nullptr;
}
enum Ept { EPT_THREAD, EPT_PROCESS, EPT_PROCESS_DIE };
// Wrapper around _endthreadex(), exit() and _exit()
[[noreturn]]
static void exit_process_or_thread(Ept what, int code);
JNIEXPORT
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
// Thread start routine for all newly created threads.
// Called with the associated Thread* as the argument.
unsigned __stdcall os::win32::thread_native_entry(void* t) {
static unsigned __stdcall thread_native_entry(void* t) {
Thread* thread = static_cast<Thread*>(t);
thread->record_stack_base_and_size();
@@ -558,7 +563,8 @@ unsigned __stdcall os::win32::thread_native_entry(void* t) {
// Thread must not return from exit_process_or_thread(), but if it does,
// let it proceed to exit normally
return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
exit_process_or_thread(EPT_THREAD, res);
return res;
}
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
@@ -745,7 +751,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
thread_handle =
(HANDLE)_beginthreadex(nullptr,
(unsigned)stack_size,
&os::win32::thread_native_entry,
&thread_native_entry,
thread,
initflag,
&thread_id);
@@ -1202,7 +1208,7 @@ void os::abort(bool dump_core, void* siginfo, const void* context) {
if (dumpFile != nullptr) {
CloseHandle(dumpFile);
}
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
exit_process_or_thread(EPT_PROCESS, 1);
}
dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
@@ -1226,12 +1232,12 @@ void os::abort(bool dump_core, void* siginfo, const void* context) {
jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
}
CloseHandle(dumpFile);
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
exit_process_or_thread(EPT_PROCESS, 1);
}
// Die immediately, no exit hook, no abort hook, no cleanup.
void os::die() {
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
exit_process_or_thread(EPT_PROCESS_DIE, -1);
}
void os::dll_unload(void *lib) {
@@ -4097,7 +4103,7 @@ static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
return TRUE;
}
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
static void exit_process_or_thread(Ept what, int exit_code) {
// Basic approach:
// - Each exiting thread registers its intent to exit and then does so.
// - A thread trying to terminate the process must wait for all
@@ -4275,7 +4281,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
}
// Should not reach here
return exit_code;
os::infinite_sleep();
}
#undef EXIT_TIMEOUT
@@ -4853,11 +4859,11 @@ ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) {
}
void os::exit(int num) {
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
exit_process_or_thread(EPT_PROCESS, num);
}
void os::_exit(int num) {
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, num);
exit_process_or_thread(EPT_PROCESS_DIE, num);
}
// Is a (classpath) directory empty?

View File

@@ -70,13 +70,6 @@ class os::win32 {
static HINSTANCE load_Windows_dll(const char* name, char *ebuf, int ebuflen);
private:
// The handler passed to _beginthreadex().
// Called with the associated Thread* as the argument.
static unsigned __stdcall thread_native_entry(void*);
enum Ept { EPT_THREAD, EPT_PROCESS, EPT_PROCESS_DIE };
// Wrapper around _endthreadex(), exit() and _exit()
static int exit_process_or_thread(Ept what, int exit_code);
static void initialize_performance_counter();

View File

@@ -71,4 +71,5 @@ void VMError::raise_fail_fast(void* exrecord, void* context) {
RaiseFailFastException(static_cast<PEXCEPTION_RECORD>(exrecord),
static_cast<PCONTEXT>(context),
flags);
os::infinite_sleep();
}

View File

@@ -397,6 +397,7 @@ int Compilation::compile_java_method() {
PhaseTraceTime timeit(_t_buildIR);
build_hir();
}
CHECK_BAILOUT_(no_frame_size);
if (BailoutAfterHIR) {
BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size);
}
@@ -446,13 +447,13 @@ void Compilation::install_code(int frame_size) {
void Compilation::compile_method() {
CompilationMemoryStatisticMark cmsm(env()->task()->directive());
{
PhaseTraceTime timeit(_t_setup);
// setup compilation
initialize();
CHECK_BAILOUT();
}
if (!method()->can_be_compiled()) {
@@ -605,6 +606,9 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
_cfg_printer_output = new CFGPrinterOutput(this);
}
#endif
CompilationMemoryStatisticMark cmsm(directive);
compile_method();
if (bailed_out()) {
_env->record_method_not_compilable(bailout_msg());

View File

@@ -85,6 +85,7 @@ class Compilation: public StackObj {
bool _has_monitors; // Fastpath monitors detection for Continuations
bool _install_code;
const char* _bailout_msg;
bool _oom;
ExceptionInfoList* _exception_info_list;
ExceptionHandlerTable _exception_handler_table;
ImplicitExceptionTable _implicit_exception_table;
@@ -203,6 +204,10 @@ class Compilation: public StackObj {
}
#endif // PRODUCT
// MemLimit handling
bool oom() const { return _oom; }
void set_oom() { _oom = true; }
// error handling
void bailout(const char* msg);
bool bailed_out() const { return _bailout_msg != nullptr; }

View File

@@ -241,7 +241,7 @@ Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
TRAPS) {
Handle protection_domain;
if (shared_protection_domain(shared_path_index) == nullptr) {
Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
Handle pd = get_protection_domain_from_classloader(class_loader, url, CHECK_NH);
atomic_set_shared_protection_domain(shared_path_index, pd());
}

View File

@@ -319,10 +319,10 @@ public:
// This is true if the compilation is not going to produce code.
// (It is reasonable to retry failed compilations.)
bool failing() { return _failure_reason != nullptr; }
bool failing() const { return _failure_reason != nullptr; }
// Reason this compilation is failing, such as "too many basic blocks".
const char* failure_reason() { return _failure_reason; }
const char* failure_reason() const { return _failure_reason; }
// Return state of appropriate compatibility
int compilable() { return _compilable; }

View File

@@ -86,22 +86,6 @@ typedef int (*canonicalize_fn_t)(const char *orig, char *out, int len);
static canonicalize_fn_t CanonicalizeEntry = nullptr;
// Entry points in zip.dll for loading zip/jar file entries
typedef void * * (*ZipOpen_t)(const char *name, char **pmsg);
typedef void (*ZipClose_t)(jzfile *zip);
typedef jzentry* (*FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
typedef jboolean (*ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
typedef jint (*Crc32_t)(jint crc, const jbyte *buf, jint len);
static ZipOpen_t ZipOpen = nullptr;
static ZipClose_t ZipClose = nullptr;
static FindEntry_t FindEntry = nullptr;
static ReadEntry_t ReadEntry = nullptr;
static Crc32_t Crc32 = nullptr;
int ClassLoader::_libzip_loaded = 0;
void* ClassLoader::_zip_handle = nullptr;
// Entry points for jimage.dll for loading jimage file entries
static JImageOpen_t JImageOpen = nullptr;
@@ -292,7 +276,7 @@ ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name,
}
ClassPathZipEntry::~ClassPathZipEntry() {
(*ZipClose)(_zip);
ZipLibrary::close(_zip);
FREE_C_HEAP_ARRAY(char, _zip_name);
}
@@ -301,7 +285,7 @@ u1* ClassPathZipEntry::open_entry(JavaThread* current, const char* name, jint* f
ThreadToNativeFromVM ttn(current);
// check whether zip archive contains name
jint name_len;
jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
jzentry* entry = ZipLibrary::find_entry(_zip, name, filesize, &name_len);
if (entry == nullptr) return nullptr;
u1* buffer;
char name_buf[128];
@@ -321,7 +305,9 @@ u1* ClassPathZipEntry::open_entry(JavaThread* current, const char* name, jint* f
size++;
}
buffer = NEW_RESOURCE_ARRAY(u1, size);
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return nullptr;
if (!ZipLibrary::read_entry(_zip, entry, buffer, filename)) {
return nullptr;
}
// return result
if (nul_terminate) {
@@ -724,8 +710,7 @@ jzfile* ClassLoader::open_zip_file(const char* canonical_path, char** error_msg,
// enable call to C land
ThreadToNativeFromVM ttn(thread);
HandleMark hm(thread);
load_zip_library_if_needed();
return (*ZipOpen)(canonical_path, error_msg);
return ZipLibrary::open(canonical_path, error_msg);
}
ClassPathEntry* ClassLoader::create_class_path_entry(JavaThread* current,
@@ -937,32 +922,6 @@ void ClassLoader::load_java_library() {
CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, dll_lookup(javalib_handle, "JDK_Canonicalize", nullptr));
}
void ClassLoader::release_load_zip_library() {
ConditionalMutexLocker locker(Zip_lock, Zip_lock != nullptr, Monitor::_no_safepoint_check_flag);
if (_libzip_loaded == 0) {
load_zip_library();
Atomic::release_store(&_libzip_loaded, 1);
}
}
void ClassLoader::load_zip_library() {
assert(ZipOpen == nullptr, "should not load zip library twice");
char path[JVM_MAXPATHLEN];
char ebuf[1024];
if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
_zip_handle = os::dll_load(path, ebuf, sizeof ebuf);
}
if (_zip_handle == nullptr) {
vm_exit_during_initialization("Unable to load zip library", path);
}
ZipOpen = CAST_TO_FN_PTR(ZipOpen_t, dll_lookup(_zip_handle, "ZIP_Open", path));
ZipClose = CAST_TO_FN_PTR(ZipClose_t, dll_lookup(_zip_handle, "ZIP_Close", path));
FindEntry = CAST_TO_FN_PTR(FindEntry_t, dll_lookup(_zip_handle, "ZIP_FindEntry", path));
ReadEntry = CAST_TO_FN_PTR(ReadEntry_t, dll_lookup(_zip_handle, "ZIP_ReadEntry", path));
Crc32 = CAST_TO_FN_PTR(Crc32_t, dll_lookup(_zip_handle, "ZIP_CRC32", path));
}
void ClassLoader::load_jimage_library() {
assert(JImageOpen == nullptr, "should not load jimage library twice");
char path[JVM_MAXPATHLEN];
@@ -982,8 +941,7 @@ void ClassLoader::load_jimage_library() {
}
int ClassLoader::crc32(int crc, const char* buf, int len) {
load_zip_library_if_needed();
return (*Crc32)(crc, (const jbyte*)buf, len);
return ZipLibrary::crc32(crc, (const jbyte*)buf, len);
}
oop ClassLoader::get_system_package(const char* name, TRAPS) {

View File

@@ -30,6 +30,7 @@
#include "runtime/perfDataTypes.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "utilities/zipLibrary.hpp"
// The VM class loader.
#include <sys/stat.h>
@@ -84,19 +85,6 @@ class ClassPathDirEntry: public ClassPathEntry {
ClassFileStream* open_stream(JavaThread* current, const char* name);
};
// Type definitions for zip file and zip file entry
typedef void* jzfile;
typedef struct {
char *name; /* entry name */
jlong time; /* modification time */
jlong size; /* size of uncompressed data */
jlong csize; /* size of compressed data (zero if uncompressed) */
jint crc; /* crc of uncompressed data */
char *comment; /* optional zip file comment */
jbyte *extra; /* optional extra data */
jlong pos; /* position of LOC header (if negative) or data */
} jzentry;
class ClassPathZipEntry: public ClassPathEntry {
private:
jzfile* _zip; // The zip archive
@@ -227,8 +215,6 @@ class ClassLoader: AllStatic {
CDS_ONLY(static void add_to_module_path_entries(const char* path,
ClassPathEntry* entry);)
// cache the zip library handle
static void* _zip_handle;
public:
CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;})
CDS_ONLY(static ClassPathEntry* module_path_entries() {return _module_path_entries;})
@@ -247,16 +233,10 @@ class ClassLoader: AllStatic {
static void* dll_lookup(void* lib, const char* name, const char* path);
static void load_java_library();
static void load_zip_library();
static void load_jimage_library();
private:
static int _libzip_loaded; // used to sync loading zip.
static void release_load_zip_library();
public:
static inline void load_zip_library_if_needed();
static void* zip_library_handle() { return _zip_handle; }
static void* zip_library_handle();
static jzfile* open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread);
static ClassPathEntry* create_class_path_entry(JavaThread* current,
const char *path, const struct stat* st,

View File

@@ -58,12 +58,6 @@ inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
}
}
inline void ClassLoader::load_zip_library_if_needed() {
if (Atomic::load_acquire(&_libzip_loaded) == 0) {
release_load_zip_library();
}
}
#if INCLUDE_CDS
// Helper function used by CDS code to get the number of boot classpath

View File

@@ -2453,7 +2453,7 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
BacktraceElement bte = iter.next(THREAD);
print_stack_element_to_stream(st, bte._mirror, bte._method_id, bte._version, bte._bci, bte._name);
}
{
if (THREAD->can_call_java()) {
// Call getCause() which doesn't necessarily return the _cause field.
ExceptionMark em(THREAD);
JavaValue cause(T_OBJECT);
@@ -2475,6 +2475,9 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
st->cr();
}
}
} else {
st->print_raw_cr("<<cannot call Java to get cause>>");
return;
}
}
}

View File

@@ -611,7 +611,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
InstanceKlass* loaded_class = nullptr;
SymbolHandle superclassname; // Keep alive while loading in parallel thread.
assert(THREAD->can_call_java(),
guarantee(THREAD->can_call_java(),
"can not load classes with compiler thread: class=%s, classloader=%s",
name->as_C_string(),
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
@@ -2056,7 +2056,7 @@ Method* SystemDictionary::find_method_handle_invoker(Klass* klass,
Klass* accessing_klass,
Handle* appendix_result,
TRAPS) {
assert(THREAD->can_call_java() ,"");
guarantee(THREAD->can_call_java(), "");
Handle method_type =
SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_NULL);

View File

@@ -26,6 +26,9 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#ifdef COMPILER1
#include "c1/c1_Compilation.hpp"
#endif
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilationMemoryStatistic.hpp"
#include "compiler/compilerDirectives.hpp"
@@ -42,15 +45,16 @@
#endif
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
#include "utilities/resourceHash.hpp"
ArenaStatCounter::ArenaStatCounter() :
_current(0), _start(0), _peak(0),
_na(0), _ra(0),
_limit(0), _hit_limit(false),
_na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0)
{}
@@ -58,8 +62,15 @@ size_t ArenaStatCounter::peak_since_start() const {
return _peak > _start ? _peak - _start : 0;
}
void ArenaStatCounter::start() {
void ArenaStatCounter::start(size_t limit) {
_peak = _start = _current;
_limit = limit;
_hit_limit = false;
}
void ArenaStatCounter::end(){
_limit = 0;
_hit_limit = false;
}
void ArenaStatCounter::update_c2_node_count() {
@@ -104,6 +115,10 @@ bool ArenaStatCounter::account(ssize_t delta, int tag) {
_ra_at_peak = _ra;
update_c2_node_count();
rc = true;
// Did we hit the memory limit?
if (!_hit_limit && _limit > 0 && peak_since_start() > _limit) {
_hit_limit = true;
}
}
return rc;
}
@@ -125,7 +140,8 @@ class FullMethodName {
public:
FullMethodName(Symbol* k, Symbol* m, Symbol* s) : _k(k), _m(m), _s(s) {}
FullMethodName(const Method* m) :
_k(m->klass_name()), _m(m->name()), _s(m->signature()) {};
FullMethodName(const FullMethodName& o) : _k(o._k), _m(o._m), _s(o._s) {}
void make_permanent() {
@@ -173,13 +189,15 @@ class MemStatEntry : public CHeapObj<mtInternal> {
size_t _na_at_peak;
size_t _ra_at_peak;
unsigned _live_nodes_at_peak;
const char* _result;
public:
MemStatEntry(FullMethodName method)
: _method(method), _comptype(compiler_c1),
_time(0), _num_recomp(0), _thread(nullptr),
_total(0), _na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0) {
_total(0), _na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0),
_result(nullptr) {
}
void set_comptype(CompilerType comptype) { _comptype = comptype; }
@@ -192,6 +210,8 @@ public:
void set_ra_at_peak(size_t n) { _ra_at_peak = n; }
void set_live_nodes_at_peak(unsigned n) { _live_nodes_at_peak = n; }
void set_result(const char* s) { _result = s; }
size_t total() const { return _total; }
static void print_legend(outputStream* st) {
@@ -199,7 +219,8 @@ public:
st->print_cr(" total : memory allocated via arenas while compiling");
st->print_cr(" NA : ...how much in node arenas (if c2)");
st->print_cr(" RA : ...how much in resource areas");
st->print_cr(" #nodes : ...how many nodes (if c2)");
st->print_cr(" result : Result: 'ok' finished successfully, 'oom' hit memory limit, 'err' compilation failed");
st->print_cr(" #nodes : ...how many nodes (c2 only)");
st->print_cr(" time : time of last compilation (sec)");
st->print_cr(" type : compiler type");
st->print_cr(" #rc : how often recompiled");
@@ -207,7 +228,7 @@ public:
}
static void print_header(outputStream* st) {
st->print_cr("total NA RA #nodes time type #rc thread method");
st->print_cr("total NA RA result #nodes time type #rc thread method");
}
void print_on(outputStream* st, bool human_readable) const {
@@ -237,6 +258,10 @@ public:
}
col += 10; st->fill_to(col);
// result?
st->print("%s ", _result ? _result : "");
col += 8; st->fill_to(col);
// Number of Nodes when memory peaked
st->print("%u ", _live_nodes_at_peak);
col += 8; st->fill_to(col);
@@ -281,7 +306,7 @@ public:
void add(const FullMethodName& fmn, CompilerType comptype,
size_t total, size_t na_at_peak, size_t ra_at_peak,
unsigned live_nodes_at_peak) {
unsigned live_nodes_at_peak, const char* result) {
assert_lock_strong(NMTCompilationCostHistory_lock);
MemStatEntry** pe = get(fmn);
@@ -302,6 +327,7 @@ public:
e->set_na_at_peak(na_at_peak);
e->set_ra_at_peak(ra_at_peak);
e->set_live_nodes_at_peak(live_nodes_at_peak);
e->set_result(result);
}
// Returns a C-heap-allocated SortMe array containing all entries from the table,
@@ -341,20 +367,21 @@ void CompilationMemoryStatistic::initialize() {
log_info(compilation, alloc)("Compilation memory statistic enabled");
}
void CompilationMemoryStatistic::on_start_compilation() {
void CompilationMemoryStatistic::on_start_compilation(const DirectiveSet* directive) {
assert(enabled(), "Not enabled?");
Thread::current()->as_Compiler_thread()->arena_stat()->start();
const size_t limit = directive->mem_limit();
Thread::current()->as_Compiler_thread()->arena_stat()->start(limit);
}
void CompilationMemoryStatistic::on_end_compilation() {
assert(enabled(), "Not enabled?");
ResourceMark rm;
CompilerThread* const th = Thread::current()->as_Compiler_thread();
const ArenaStatCounter* const arena_stat = th->arena_stat();
ArenaStatCounter* const arena_stat = th->arena_stat();
const CompilerType ct = th->task()->compiler()->type();
const Method* const m = th->task()->method();
FullMethodName fmn(m->klass_name(), m->name(), m->signature());
FullMethodName fmn(m);
fmn.make_permanent();
const DirectiveSet* directive = th->task()->directive();
@@ -368,6 +395,20 @@ void CompilationMemoryStatistic::on_end_compilation() {
arena_stat->print_on(tty);
tty->cr();
}
// Store result
// For this to work, we must call on_end_compilation() at a point where
// Compile|Compilation already handed over the failure string to ciEnv,
// but ciEnv must still be alive.
const char* result = "ok"; // ok
const ciEnv* const env = th->env();
if (env) {
const char* const failure_reason = env->failure_reason();
if (failure_reason != nullptr) {
result = (failure_reason == failure_reason_memlimit()) ? "oom" : "err";
}
}
{
MutexLocker ml(NMTCompilationCostHistory_lock, Mutex::_no_safepoint_check_flag);
assert(_the_table != nullptr, "not initialized");
@@ -376,14 +417,105 @@ void CompilationMemoryStatistic::on_end_compilation() {
arena_stat->peak_since_start(), // total
arena_stat->na_at_peak(),
arena_stat->ra_at_peak(),
arena_stat->live_nodes_at_peak());
arena_stat->live_nodes_at_peak(),
result);
}
arena_stat->end(); // reset things
}
static void inform_compilation_about_oom(CompilerType ct) {
// Inform C1 or C2 that an OOM happened. They will take delayed action
// and abort the compilation in progress. Note that this is not instantaneous,
// since the compiler has to actively bailout, which may take a while, during
// which memory usage may rise further.
//
// The mechanism differs slightly between C1 and C2:
// - With C1, we directly set the bailout string, which will cause C1 to
// bailout at the typical BAILOUT places.
// - With C2, the corresponding mechanism would be the failure string; but
// bailout paths in C2 are not complete and therefore it is dangerous to
// set the failure string at - for C2 - seemingly random places. Instead,
// upon OOM C2 sets the failure string next time it checks the node limit.
if (ciEnv::current() != nullptr) {
void* compiler_data = ciEnv::current()->compiler_data();
#ifdef COMPILER1
if (ct == compiler_c1) {
Compilation* C = static_cast<Compilation*>(compiler_data);
if (C != nullptr) {
C->bailout(CompilationMemoryStatistic::failure_reason_memlimit());
C->set_oom();
}
}
#endif
#ifdef COMPILER2
if (ct == compiler_c2) {
Compile* C = static_cast<Compile*>(compiler_data);
if (C != nullptr) {
C->set_oom();
}
}
#endif // COMPILER2
}
}
void CompilationMemoryStatistic::on_arena_change(ssize_t diff, const Arena* arena) {
assert(enabled(), "Not enabled?");
CompilerThread* const th = Thread::current()->as_Compiler_thread();
th->arena_stat()->account(diff, (int)arena->get_tag());
ArenaStatCounter* const arena_stat = th->arena_stat();
bool hit_limit_before = arena_stat->hit_limit();
if (arena_stat->account(diff, (int)arena->get_tag())) { // new peak?
// Limit handling
if (arena_stat->hit_limit()) {
char name[1024] = "";
bool print = false;
bool crash = false;
CompilerType ct = compiler_none;
// get some more info
const CompileTask* task = th->task();
if (task != nullptr) {
ct = task->compiler()->type();
const DirectiveSet* directive = task->directive();
print = directive->should_print_memstat();
crash = directive->should_crash_at_mem_limit();
const Method* m = th->task()->method();
if (m != nullptr) {
FullMethodName(m).as_C_string(name, sizeof(name));
}
}
char message[1024] = "";
// build up message if we need it later
if (print || crash) {
stringStream ss(message, sizeof(message));
if (ct != compiler_none && name[0] != '\0') {
ss.print("%s %s: ", compilertype2name(ct), name);
}
ss.print("Hit MemLimit %s (limit: %zu now: %zu)",
(hit_limit_before ? "again" : ""),
arena_stat->limit(), arena_stat->peak_since_start());
}
// log if needed
if (print) {
tty->print_raw(message);
tty->cr();
}
// Crash out if needed
if (crash) {
report_fatal(OOM_HOTSPOT_ARENA, __FILE__, __LINE__, "%s", message);
} else {
inform_compilation_about_oom(ct);
}
}
}
}
static inline ssize_t diff_entries_by_size(const MemStatEntry* e1, const MemStatEntry* e2) {
@@ -438,10 +570,15 @@ void CompilationMemoryStatistic::print_all_by_size(outputStream* st, bool human_
FREE_C_HEAP_ARRAY(Entry, filtered);
}
const char* CompilationMemoryStatistic::failure_reason_memlimit() {
static const char* const s = "hit memory limit while compiling";
return s;
}
CompilationMemoryStatisticMark::CompilationMemoryStatisticMark(const DirectiveSet* directive)
: _active(directive->should_collect_memstat()) {
if (_active) {
CompilationMemoryStatistic::on_start_compilation();
CompilationMemoryStatistic::on_start_compilation(directive);
}
}
CompilationMemoryStatisticMark::~CompilationMemoryStatisticMark() {

View File

@@ -47,6 +47,9 @@ class ArenaStatCounter : public CHeapObj<mtCompiler> {
size_t _na;
// Current bytes used for resource areas
size_t _ra;
// MemLimit handling
size_t _limit;
bool _hit_limit;
// Peak composition:
// Size of node arena when total peaked (c2 only)
@@ -69,15 +72,20 @@ public:
size_t ra_at_peak() const { return _ra_at_peak; }
unsigned live_nodes_at_peak() const { return _live_nodes_at_peak; }
// Mark the start of a compilation.
void start();
// Mark the start and end of a compilation.
void start(size_t limit);
void end();
// Account an arena allocation or de-allocation.
// Returns true if new peak reached
bool account(ssize_t delta, int tag);
void set_live_nodes_at_peak(unsigned i) { _live_nodes_at_peak = i; }
void print_on(outputStream* st) const;
size_t limit() const { return _limit; }
bool hit_limit() const { return _hit_limit; }
};
class CompilationMemoryStatistic : public AllStatic {
@@ -86,10 +94,16 @@ public:
static void initialize();
// true if CollectMemStat or PrintMemStat has been enabled for any method
static bool enabled() { return _enabled; }
static void on_start_compilation();
static void on_start_compilation(const DirectiveSet* directive);
// Called at end of compilation. Records the arena usage peak. Also takes over
// status information from ciEnv (compilation failed, oom'ed or went okay). ciEnv::_failure_reason
// must be set at this point (so place CompilationMemoryStatisticMark correctly).
static void on_end_compilation();
static void on_arena_change(ssize_t diff, const Arena* arena);
static void print_all_by_size(outputStream* st, bool human_readable, size_t minsize);
// For compilers
static const char* failure_reason_memlimit();
};
// RAII object to wrap one compilation

View File

@@ -142,7 +142,7 @@ void CompileTask::initialize(int compile_id,
/**
* Returns the compiler for this task.
*/
AbstractCompiler* CompileTask::compiler() {
AbstractCompiler* CompileTask::compiler() const {
return CompileBroker::compiler(_comp_level);
}

View File

@@ -180,7 +180,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
int comp_level() { return _comp_level;}
void set_comp_level(int comp_level) { _comp_level = comp_level;}
AbstractCompiler* compiler();
AbstractCompiler* compiler() const;
CompileTask* select_for_compilation();
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }

View File

@@ -203,13 +203,24 @@ bool DirectiveSet::is_c2(CompilerDirectives* directive) const {
}
bool DirectiveSet::should_collect_memstat() const {
return MemStatOption > 0;
// MemLimit requires the memory statistic to be active
return MemStatOption > 0 || MemLimitOption != 0;
}
bool DirectiveSet::should_print_memstat() const {
return MemStatOption == (uintx)MemStatAction::print;
}
size_t DirectiveSet::mem_limit() const {
return MemLimitOption < 0 ? -MemLimitOption : MemLimitOption;
}
bool DirectiveSet::should_crash_at_mem_limit() const {
// The sign encodes the action to be taken when reaching
// the memory limit (+ stop - crash)
return MemLimitOption < 0;
}
// In the list of Control/disabled intrinsics, the ID of the control intrinsics can separated:
// - by ',' (if -XX:Control/DisableIntrinsic is used once when invoking the VM) or
// - by '\n' (if -XX:Control/DisableIntrinsic is used multiple times when invoking the VM) or

View File

@@ -41,6 +41,7 @@
cflags(BreakAtExecute, bool, false, BreakAtExecute) \
cflags(BreakAtCompile, bool, false, BreakAtCompile) \
cflags(Log, bool, LogCompilation, Unknown) \
cflags(MemLimit, intx, 0, MemLimit) \
cflags(MemStat, uintx, 0, MemStat) \
cflags(PrintAssembly, bool, PrintAssembly, PrintAssembly) \
cflags(PrintCompilation, bool, PrintCompilation, PrintCompilation) \
@@ -150,6 +151,8 @@ public:
bool is_c2(CompilerDirectives* directive) const;
bool should_collect_memstat() const;
bool should_print_memstat() const;
size_t mem_limit() const;
bool should_crash_at_mem_limit() const; // true: crash false: stop compilation
typedef enum {
#define enum_of_flags(name, type, dvalue, cc_flag) name##Index,

View File

@@ -39,6 +39,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
#include "utilities/parseInteger.hpp"
static const char* optiontype_names[] = {
#define enum_of_types(type, name) name,
@@ -459,7 +460,7 @@ bool CompilerOracle::should_print_methods() {
// Tells whether there are any methods to collect memory statistics for
bool CompilerOracle::should_collect_memstat() {
return has_command(CompileCommand::MemStat);
return has_command(CompileCommand::MemStat) || has_command(CompileCommand::MemLimit);
}
bool CompilerOracle::should_print_final_memstat_report() {
@@ -634,6 +635,44 @@ void skip_comma(char* &line) {
}
}
static bool parseMemLimit(const char* line, intx& value, int& bytes_read, char* errorbuf, const int buf_size) {
// Format:
// "<memory size>['~' <suboption>]"
// <memory size> can have units, e.g. M
// <suboption> one of "crash" "stop", if omitted, "stop" is implied.
//
// Examples:
// -XX:CompileCommand='memlimit,*.*,20m'
// -XX:CompileCommand='memlimit,*.*,20m~stop'
// -XX:CompileCommand='memlimit,Option::toString,1m~crash'
//
// The resulting intx carries the size and whether we are to stop or crash:
// - neg. value means crash
// - pos. value (default) means stop
size_t s = 0;
char* end;
if (!parse_integer<size_t>(line, &end, &s)) {
jio_snprintf(errorbuf, buf_size, "MemLimit: invalid value");
}
bytes_read = (int)(end - line);
intx v = (intx)s;
if ((*end) != '\0') {
if (strncasecmp(end, "~crash", 6) == 0) {
v = -v;
bytes_read += 6;
} else if (strncasecmp(end, "~stop", 5) == 0) {
// ok, this is the default
bytes_read += 5;
} else {
jio_snprintf(errorbuf, buf_size, "MemLimit: invalid option");
return true;
}
}
value = v;
return true;
}
static bool parseEnumValueAsUintx(enum CompileCommand option, const char* line, uintx& value, int& bytes_read, char* errorbuf, const int buf_size) {
if (option == CompileCommand::MemStat) {
if (strncasecmp(line, "collect", 7) == 0) {
@@ -659,7 +698,13 @@ static void scan_value(enum OptionType type, char* line, int& total_bytes_read,
total_bytes_read += skipped;
if (type == OptionType::Intx) {
intx value;
if (sscanf(line, "" INTX_FORMAT "%n", &value, &bytes_read) == 1) {
// Special handling for memlimit
bool success = (option == CompileCommand::MemLimit) && parseMemLimit(line, value, bytes_read, errorbuf, buf_size);
if (!success) {
// Is it a raw number?
success = sscanf(line, "" INTX_FORMAT "%n", &value, &bytes_read) == 1;
}
if (success) {
total_bytes_read += bytes_read;
line += bytes_read;
register_command(matcher, option, value);

View File

@@ -57,6 +57,7 @@ class methodHandle;
option(Break, "break", Bool) \
option(BreakAtExecute, "BreakAtExecute", Bool) \
option(BreakAtCompile, "BreakAtCompile", Bool) \
option(MemLimit, "MemLimit", Intx) \
option(MemStat, "MemStat", Uintx) \
option(PrintAssembly, "PrintAssembly", Bool) \
option(PrintCompilation, "PrintCompilation", Bool) \

View File

@@ -39,6 +39,7 @@ CompilerThread::CompilerThread(CompileQueue* queue,
_queue = queue;
_counters = counters;
_buffer_blob = nullptr;
_can_call_java = false;
_compiler = nullptr;
_arena_stat = CompilationMemoryStatistic::enabled() ? new ArenaStatCounter : nullptr;
@@ -56,15 +57,17 @@ CompilerThread::~CompilerThread() {
delete _arena_stat;
}
void CompilerThread::set_compiler(AbstractCompiler* c) {
// Only jvmci compiler threads can call Java
_can_call_java = c != nullptr && c->is_jvmci();
_compiler = c;
}
void CompilerThread::thread_entry(JavaThread* thread, TRAPS) {
assert(thread->is_Compiler_thread(), "must be compiler thread");
CompileBroker::compiler_thread_loop();
}
bool CompilerThread::can_call_java() const {
return _compiler != nullptr && _compiler->is_jvmci();
}
// Hide native compiler threads from external view.
bool CompilerThread::is_hidden_from_external_view() const {
return _compiler == nullptr || _compiler->is_hidden_from_external_view();

View File

@@ -31,18 +31,17 @@ class AbstractCompiler;
class ArenaStatCounter;
class BufferBlob;
class ciEnv;
class CompileThread;
class CompilerThread;
class CompileLog;
class CompileTask;
class CompileQueue;
class CompilerCounters;
class IdealGraphPrinter;
class JVMCIEnv;
class JVMCIPrimitiveArray;
// A thread used for Compilation.
class CompilerThread : public JavaThread {
friend class VMStructs;
JVMCI_ONLY(friend class CompilerThreadCanCallJava;)
private:
CompilerCounters* _counters;
@@ -51,6 +50,7 @@ class CompilerThread : public JavaThread {
CompileTask* volatile _task; // print_threads_compiling can read this concurrently.
CompileQueue* _queue;
BufferBlob* _buffer_blob;
bool _can_call_java;
AbstractCompiler* _compiler;
TimeStamp _idle_time;
@@ -73,13 +73,13 @@ class CompilerThread : public JavaThread {
bool is_Compiler_thread() const { return true; }
virtual bool can_call_java() const;
virtual bool can_call_java() const { return _can_call_java; }
// Returns true if this CompilerThread is hidden from JVMTI and FlightRecorder. C1 and C2 are
// always hidden but JVMCI compiler threads might be hidden.
virtual bool is_hidden_from_external_view() const;
void set_compiler(AbstractCompiler* c) { _compiler = c; }
void set_compiler(AbstractCompiler* c);
AbstractCompiler* compiler() const { return _compiler; }
CompileQueue* queue() const { return _queue; }

View File

@@ -389,20 +389,7 @@ void PSCardTable::verify_all_young_refs_imprecise() {
bool PSCardTable::addr_is_marked_imprecise(void *addr) {
CardValue* p = byte_for(addr);
CardValue val = *p;
if (card_is_dirty(val))
return true;
if (card_is_newgen(val))
return true;
if (card_is_clean(val))
return false;
assert(false, "Found unhandled card mark type");
return false;
return is_dirty(p);
}
bool PSCardTable::is_in_young(const void* p) const {

View File

@@ -63,11 +63,6 @@ class PSCardTable: public CardTable {
HeapWord* const start,
HeapWord* const end);
enum ExtendedCardValue {
youngergen_card = CT_MR_BS_last_reserved + 1,
verify_card = CT_MR_BS_last_reserved + 5
};
void scan_obj_with_limit(PSPromotionManager* pm,
oop obj,
HeapWord* start,
@@ -77,9 +72,6 @@ class PSCardTable: public CardTable {
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap),
_preprocessing_active_workers(0) {}
static CardValue youngergen_card_val() { return youngergen_card; }
static CardValue verify_card_val() { return verify_card; }
// Scavenge support
void pre_scavenge(HeapWord* old_gen_bottom, uint active_workers);
// Scavenge contents of stripes with the given index.
@@ -92,29 +84,15 @@ class PSCardTable: public CardTable {
bool addr_is_marked_imprecise(void *addr);
void set_card_newgen(void* addr) { CardValue* p = byte_for(addr); *p = verify_card; }
// Testers for entries
static bool card_is_dirty(int value) { return value == dirty_card; }
static bool card_is_newgen(int value) { return value == youngergen_card; }
static bool card_is_clean(int value) { return value == clean_card; }
static bool card_is_verify(int value) { return value == verify_card; }
// Card marking
void inline_write_ref_field_gc(void* field) {
CardValue* byte = byte_for(field);
*byte = youngergen_card;
*byte = dirty_card_val();
}
// ReduceInitialCardMarks support
bool is_in_young(const void* p) const override;
#ifdef ASSERT
bool is_valid_card_address(CardValue* addr) {
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
}
#endif // ASSERT
// Verification
void verify_all_young_refs_imprecise();
};

View File

@@ -26,7 +26,6 @@
#include "gc/serial/serialBlockOffsetTable.inline.hpp"
#include "gc/shared/blockOffsetTable.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/space.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
@@ -34,14 +33,9 @@
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
//////////////////////////////////////////////////////////////////////
// BlockOffsetSharedArray
//////////////////////////////////////////////////////////////////////
BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
size_t init_word_size):
_reserved(reserved), _end(nullptr)
{
SerialBlockOffsetSharedArray::SerialBlockOffsetSharedArray(MemRegion reserved,
size_t init_word_size):
_reserved(reserved) {
size_t size = compute_size(reserved.word_size());
ReservedSpace rs(size);
if (!rs.is_reserved()) {
@@ -53,27 +47,25 @@ BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
if (!_vs.initialize(rs, 0)) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
_offset_array = (u_char*)_vs.low_boundary();
_offset_array = (uint8_t*)_vs.low_boundary();
resize(init_word_size);
log_trace(gc, bot)("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
log_trace(gc, bot)("SerialBlockOffsetSharedArray::SerialBlockOffsetSharedArray: ");
log_trace(gc, bot)(" rs.base(): " PTR_FORMAT " rs.size(): " SIZE_FORMAT_X_0 " rs end(): " PTR_FORMAT,
p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size()));
log_trace(gc, bot)(" _vs.low_boundary(): " PTR_FORMAT " _vs.high_boundary(): " PTR_FORMAT,
p2i(_vs.low_boundary()), p2i(_vs.high_boundary()));
}
void BlockOffsetSharedArray::resize(size_t new_word_size) {
void SerialBlockOffsetSharedArray::resize(size_t new_word_size) {
assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
size_t new_size = compute_size(new_word_size);
size_t old_size = _vs.committed_size();
size_t delta;
char* high = _vs.high();
_end = _reserved.start() + new_word_size;
if (new_size > old_size) {
delta = ReservedSpace::page_align_size_up(new_size - old_size);
assert(delta > 0, "just checking");
if (!_vs.expand_by(delta)) {
// Do better than this for Merlin
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
}
assert(_vs.high() == high + delta, "invalid expansion");
@@ -85,384 +77,109 @@ void BlockOffsetSharedArray::resize(size_t new_word_size) {
}
}
bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
assert(p >= _reserved.start(), "just checking");
size_t delta = pointer_delta(p, _reserved.start());
return (delta & right_n_bits((int)BOTConstants::log_card_size_in_words())) == (size_t)NoBits;
}
// Write the backskip value for each logarithmic region (array slots containing the same entry value).
//
// offset
// card 2nd 3rd
// | +- 1st | |
// v v v v
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
// |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
// 11 19 75
// 12
//
// offset card is the card that points to the start of an object
// x - offset value of offset card
// 1st - start of first logarithmic region
// 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
// 2nd - start of second logarithmic region
// 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
// 3rd - start of third logarithmic region
// 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
//
// integer below the block offset entry is an example of
// the index of the entry
//
// Given an address,
// Find the index for the address
// Find the block offset table entry
// Convert the entry to a back slide
// (e.g., with today's, offset = 0x81 =>
// back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
// Move back N (e.g., 8) entries and repeat with the
// value of the new entry
//
void SerialBlockOffsetTable::update_for_block_work(HeapWord* blk_start,
HeapWord* blk_end) {
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
size_t const offset_card = _array->index_for(cur_card_boundary);
// The first card holds the actual offset.
_array->set_offset_array(offset_card, cur_card_boundary, blk_start);
//////////////////////////////////////////////////////////////////////
// BlockOffsetArray
//////////////////////////////////////////////////////////////////////
// Check if this block spans over other cards.
size_t end_card = _array->index_for(blk_end - 1);
assert(offset_card <= end_card, "inv");
BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
MemRegion mr, bool init_to_zero_) :
BlockOffsetTable(mr.start(), mr.end()),
_array(array)
{
assert(_bottom <= _end, "arguments out of order");
set_init_to_zero(init_to_zero_);
if (!init_to_zero_) {
// initialize cards to point back to mr.start()
set_remainder_to_point_to_start(mr.start() + BOTConstants::card_size_in_words(), mr.end());
_array->set_offset_array(0, 0); // set first card to 0
}
}
if (offset_card != end_card) {
// Handling remaining cards.
size_t start_card_for_region = offset_card + 1;
for (uint i = 0; i < BOTConstants::N_powers; i++) {
// -1 so that the reach ends in this region and not at the start
// of the next.
size_t reach = offset_card + BOTConstants::power_to_cards_back(i + 1) - 1;
uint8_t value = checked_cast<uint8_t>(BOTConstants::card_size_in_words() + i);
// The arguments follow the normal convention of denoting
// a right-open interval: [start, end)
void
BlockOffsetArray::
set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) {
check_reducing_assertion(reducing);
if (start >= end) {
// The start address is equal to the end address (or to
// the right of the end address) so there are not cards
// that need to be updated..
return;
}
// Write the backskip value for each region.
//
// offset
// card 2nd 3rd
// | +- 1st | |
// v v v v
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
// |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
// 11 19 75
// 12
//
// offset card is the card that points to the start of an object
// x - offset value of offset card
// 1st - start of first logarithmic region
// 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
// 2nd - start of second logarithmic region
// 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
// 3rd - start of third logarithmic region
// 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
//
// integer below the block offset entry is an example of
// the index of the entry
//
// Given an address,
// Find the index for the address
// Find the block offset table entry
// Convert the entry to a back slide
// (e.g., with today's, offset = 0x81 =>
// back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
// Move back N (e.g., 8) entries and repeat with the
// value of the new entry
//
size_t start_card = _array->index_for(start);
size_t end_card = _array->index_for(end-1);
assert(start ==_array->address_for_index(start_card), "Precondition");
assert(end ==_array->address_for_index(end_card)+BOTConstants::card_size_in_words(), "Precondition");
set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval
}
// Unlike the normal convention in this code, the argument here denotes
// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
// above.
void
BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) {
check_reducing_assertion(reducing);
if (start_card > end_card) {
return;
}
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
assert(_array->offset_array(start_card-1) <= BOTConstants::card_size_in_words(),
"Offset card has an unexpected value");
size_t start_card_for_region = start_card;
u_char offset = max_jubyte;
for (uint i = 0; i < BOTConstants::N_powers; i++) {
// -1 so that the card with the actual offset is counted. Another -1
// so that the reach ends in this region and not at the start
// of the next.
size_t reach = start_card - 1 + (BOTConstants::power_to_cards_back(i+1) - 1);
offset = BOTConstants::card_size_in_words() + i;
if (reach >= end_card) {
_array->set_offset_array(start_card_for_region, end_card, offset, reducing);
_array->set_offset_array(start_card_for_region, MIN2(reach, end_card), value);
start_card_for_region = reach + 1;
if (reach >= end_card) {
break;
}
}
assert(start_card_for_region > end_card, "Sanity check");
}
debug_only(verify_for_block(blk_start, blk_end);)
}
HeapWord* SerialBlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
size_t index = _array->index_for(addr);
uint8_t offset;
while (true) {
offset = _array->offset_array(index);
if (offset < BOTConstants::card_size_in_words()) {
break;
}
_array->set_offset_array(start_card_for_region, reach, offset, reducing);
start_card_for_region = reach + 1;
}
assert(start_card_for_region > end_card, "Sanity check");
DEBUG_ONLY(check_all_cards(start_card, end_card);)
}
// The card-interval [start_card, end_card] is a closed interval; this
// is an expensive check -- use with care and only under protection of
// suitable flag.
void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
if (end_card < start_card) {
return;
}
guarantee(_array->offset_array(start_card) == BOTConstants::card_size_in_words(), "Wrong value in second card");
u_char last_entry = BOTConstants::card_size_in_words();
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
u_char entry = _array->offset_array(c);
guarantee(entry >= last_entry, "Monotonicity");
if (c - start_card > BOTConstants::power_to_cards_back(1)) {
guarantee(entry > BOTConstants::card_size_in_words(), "Should be in logarithmic region");
}
size_t backskip = BOTConstants::entry_to_cards_back(entry);
size_t landing_card = c - backskip;
guarantee(landing_card >= (start_card - 1), "Inv");
if (landing_card >= start_card) {
guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity");
} else {
guarantee(landing_card == (start_card - 1), "Tautology");
// Note that N_words is the maximum offset value
guarantee(_array->offset_array(landing_card) <= BOTConstants::card_size_in_words(), "Offset value");
}
last_entry = entry; // remember for monotonicity test
}
}
void
BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
assert(blk_start != nullptr && blk_end > blk_start,
"phantom block");
single_block(blk_start, blk_end);
}
void
BlockOffsetArray::do_block_internal(HeapWord* blk_start,
HeapWord* blk_end,
bool reducing) {
assert(_sp->is_in_reserved(blk_start),
"reference must be into the space");
assert(_sp->is_in_reserved(blk_end-1),
"limit must be within the space");
// This is optimized to make the test fast, assuming we only rarely
// cross boundaries.
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
uintptr_t start_ui = (uintptr_t)blk_start;
// Calculate the last card boundary preceding end of blk
intptr_t boundary_before_end = (intptr_t)end_ui;
clear_bits(boundary_before_end, right_n_bits((int)BOTConstants::log_card_size()));
if (start_ui <= (uintptr_t)boundary_before_end) {
// blk starts at or crosses a boundary
// Calculate index of card on which blk begins
size_t start_index = _array->index_for(blk_start);
// Index of card on which blk ends
size_t end_index = _array->index_for(blk_end - 1);
// Start address of card on which blk begins
HeapWord* boundary = _array->address_for_index(start_index);
assert(boundary <= blk_start, "blk should start at or after boundary");
if (blk_start != boundary) {
// blk starts strictly after boundary
// adjust card boundary and start_index forward to next card
boundary += BOTConstants::card_size_in_words();
start_index++;
}
assert(start_index <= end_index, "monotonicity of index_for()");
assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
_array->set_offset_array(start_index, boundary, blk_start, reducing);
// We have finished marking the "offset card". We need to now
// mark the subsequent cards that this blk spans.
if (start_index < end_index) {
HeapWord* rem_st = _array->address_for_index(start_index) + BOTConstants::card_size_in_words();
HeapWord* rem_end = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
set_remainder_to_point_to_start(rem_st, rem_end, reducing);
}
}
}
// The range [blk_start, blk_end) represents a single contiguous block
// of storage; modify the block offset table to represent this
// information; Right-open interval: [blk_start, blk_end)
// NOTE: this method does _not_ adjust _unallocated_block.
void
BlockOffsetArray::single_block(HeapWord* blk_start,
HeapWord* blk_end) {
do_block_internal(blk_start, blk_end);
}
void BlockOffsetArray::verify() const {
// For each entry in the block offset table, verify that
// the entry correctly finds the start of an object at the
// first address covered by the block or to the left of that
// first address.
size_t next_index = 1;
size_t last_index = last_active_index();
// Use for debugging. Initialize to null to distinguish the
// first iteration through the while loop.
HeapWord* last_p = nullptr;
HeapWord* last_start = nullptr;
oop last_o = nullptr;
while (next_index <= last_index) {
// Use an address past the start of the address for
// the entry.
HeapWord* p = _array->address_for_index(next_index) + 1;
if (p >= _end) {
// That's all of the allocated block table.
return;
}
// block_start() asserts that start <= p.
HeapWord* start = block_start(p);
// First check if the start is an allocated block and only
// then if it is a valid object.
oop o = cast_to_oop(start);
assert(!Universe::is_fully_initialized() ||
_sp->is_free_block(start) ||
oopDesc::is_oop_or_null(o), "Bad object was found");
next_index++;
last_p = p;
last_start = start;
last_o = o;
}
}
//////////////////////////////////////////////////////////////////////
// BlockOffsetArrayContigSpace
//////////////////////////////////////////////////////////////////////
HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const {
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
// Otherwise, find the block start using the table.
assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array");
size_t index = _array->index_for(addr);
// We must make sure that the offset table entry we use is valid. If
// "addr" is past the end, start at the last known one and go forward.
index = MIN2(index, _next_offset_index-1);
HeapWord* q = _array->address_for_index(index);
uint offset = _array->offset_array(index); // Extend u_char to uint.
while (offset > BOTConstants::card_size_in_words()) {
// The excess of the offset from N_words indicates a power of Base
// to go back by.
size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
q -= (BOTConstants::card_size_in_words() * n_cards_back);
assert(q >= _sp->bottom(), "Went below bottom!");
index -= n_cards_back;
offset = _array->offset_array(index);
}
while (offset == BOTConstants::card_size_in_words()) {
assert(q >= _sp->bottom(), "Went below bottom!");
q -= BOTConstants::card_size_in_words();
index--;
offset = _array->offset_array(index);
}
assert(offset < BOTConstants::card_size_in_words(), "offset too large");
q -= offset;
HeapWord* n = q;
while (n <= addr) {
debug_only(HeapWord* last = q); // for debugging
q = n;
n += _sp->block_size(n);
}
assert(q <= addr, "wrong order for current and arg");
assert(addr <= n, "wrong order for arg and next");
return q;
}
//
// _next_offset_threshold
// | _next_offset_index
// v v
// +-------+-------+-------+-------+-------+
// | i-1 | i | i+1 | i+2 | i+3 |
// +-------+-------+-------+-------+-------+
// ( ^ ]
// block-start
//
void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
HeapWord* blk_end) {
assert(blk_start != nullptr && blk_end > blk_start,
"phantom block");
assert(blk_end > _next_offset_threshold,
"should be past threshold");
assert(blk_start <= _next_offset_threshold,
"blk_start should be at or before threshold");
assert(pointer_delta(_next_offset_threshold, blk_start) <= BOTConstants::card_size_in_words(),
"offset should be <= BlockOffsetSharedArray::N");
assert(_sp->is_in_reserved(blk_start),
"reference must be into the space");
assert(_sp->is_in_reserved(blk_end-1),
"limit must be within the space");
assert(_next_offset_threshold ==
_array->_reserved.start() + _next_offset_index*BOTConstants::card_size_in_words(),
"index must agree with threshold");
debug_only(size_t orig_next_offset_index = _next_offset_index;)
// Mark the card that holds the offset into the block. Note
// that _next_offset_index and _next_offset_threshold are not
// updated until the end of this method.
_array->set_offset_array(_next_offset_index,
_next_offset_threshold,
blk_start);
// We need to now mark the subsequent cards that this blk spans.
// Index of card on which blk ends.
size_t end_index = _array->index_for(blk_end - 1);
// Are there more cards left to be updated?
if (_next_offset_index + 1 <= end_index) {
HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1);
// Calculate rem_end this way because end_index
// may be the last valid index in the covered region.
HeapWord* rem_end = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
set_remainder_to_point_to_start(rem_st, rem_end);
}
// _next_offset_index and _next_offset_threshold updated here.
_next_offset_index = end_index + 1;
// Calculate _next_offset_threshold this way because end_index
// may be the last valid index in the covered region.
_next_offset_threshold = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold");
HeapWord* q = _array->address_for_index(index);
return q - offset;
}
#ifdef ASSERT
// The offset can be 0 if the block starts on a boundary. That
// is checked by an assertion above.
size_t start_index = _array->index_for(blk_start);
HeapWord* boundary = _array->address_for_index(start_index);
assert((_array->offset_array(orig_next_offset_index) == 0 &&
blk_start == boundary) ||
(_array->offset_array(orig_next_offset_index) > 0 &&
_array->offset_array(orig_next_offset_index) <= BOTConstants::card_size_in_words()),
"offset array should have been set");
for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
assert(_array->offset_array(j) > 0 &&
_array->offset_array(j) <= (u_char) (BOTConstants::card_size_in_words()+BOTConstants::N_powers-1),
"offset array should have been set");
void SerialBlockOffsetTable::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {
assert(is_crossing_card_boundary(blk_start, blk_end), "precondition");
const size_t start_card = _array->index_for(align_up_by_card_size(blk_start));
const size_t end_card = _array->index_for(blk_end - 1);
// Check cards in [start_card, end_card]
assert(_array->offset_array(start_card) < BOTConstants::card_size_in_words(), "offset card");
for (size_t i = start_card + 1; i <= end_card; ++i) {
const uint8_t prev = _array->offset_array(i-1);
const uint8_t value = _array->offset_array(i);
if (prev != value) {
assert(value >= prev, "monotonic");
size_t n_cards_back = BOTConstants::entry_to_cards_back(value);
assert(start_card == (i - n_cards_back), "inv");
}
}
#endif
}
void BlockOffsetArrayContigSpace::initialize_threshold() {
_next_offset_index = _array->index_for(_bottom);
_next_offset_index++;
_next_offset_threshold =
_array->address_for_index(_next_offset_index);
}
void BlockOffsetArrayContigSpace::zero_bottom_entry() {
size_t bottom_index = _array->index_for(_bottom);
_array->set_offset_array(bottom_index, 0);
}
size_t BlockOffsetArrayContigSpace::last_active_index() const {
return _next_offset_index == 0 ? 0 : _next_offset_index - 1;
}

View File

@@ -36,168 +36,34 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
// The CollectedHeap type requires subtypes to implement a method
// "block_start". For some subtypes, notably generational
// systems using card-table-based write barriers, the efficiency of this
// operation may be important. Implementations of the "BlockOffsetArray"
// class may be useful in providing such efficient implementations.
//
// BlockOffsetTable (abstract)
// - BlockOffsetArray (abstract)
// - BlockOffsetArrayContigSpace
//
class ContiguousSpace;
//////////////////////////////////////////////////////////////////////////
// The BlockOffsetTable "interface"
//////////////////////////////////////////////////////////////////////////
class BlockOffsetTable {
class SerialBlockOffsetSharedArray: public CHeapObj<mtGC> {
friend class VMStructs;
protected:
// These members describe the region covered by the table.
friend class SerialBlockOffsetTable;
// The space this table is covering.
HeapWord* _bottom; // == reserved.start
HeapWord* _end; // End of currently allocated region.
public:
// Initialize the table to cover the given space.
// The contents of the initial table are undefined.
BlockOffsetTable(HeapWord* bottom, HeapWord* end):
_bottom(bottom), _end(end) {
assert(_bottom <= _end, "arguments out of order");
assert(BOTConstants::card_size() == CardTable::card_size(), "sanity");
}
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
virtual void resize(size_t new_word_size) = 0;
virtual void set_bottom(HeapWord* new_bottom) {
assert(new_bottom <= _end, "new_bottom > _end");
_bottom = new_bottom;
resize(pointer_delta(_end, _bottom));
}
// Requires "addr" to be contained by a block, and returns the address of
// the start of that block.
virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
// Returns the address of the start of the block containing "addr", or
// else "null" if it is covered by no block.
HeapWord* block_start(const void* addr) const;
};
//////////////////////////////////////////////////////////////////////////
// One implementation of "BlockOffsetTable," the BlockOffsetArray,
// divides the covered region into "N"-word subregions (where
// "N" = 2^"LogN". An array with an entry for each such subregion
// indicates how far back one must go to find the start of the
// chunk that includes the first word of the subregion.
//
// Each BlockOffsetArray is owned by a Space. However, the actual array
// may be shared by several BlockOffsetArrays; this is useful
// when a single resizable area (such as a generation) is divided up into
// several spaces in which contiguous allocation takes place. (Consider,
// for example, the garbage-first generation.)
// Here is the shared array type.
//////////////////////////////////////////////////////////////////////////
// BlockOffsetSharedArray
//////////////////////////////////////////////////////////////////////////
class BlockOffsetSharedArray: public CHeapObj<mtGC> {
friend class BlockOffsetArray;
friend class BlockOffsetArrayNonContigSpace;
friend class BlockOffsetArrayContigSpace;
friend class VMStructs;
private:
bool _init_to_zero;
// The reserved region covered by the shared array.
// The reserved heap (i.e. old-gen) covered by the shared array.
MemRegion _reserved;
// End of the current committed region.
HeapWord* _end;
// Array for keeping offsets for retrieving object start fast given an
// address.
VirtualSpace _vs;
u_char* _offset_array; // byte array keeping backwards offsets
uint8_t* _offset_array; // byte array keeping backwards offsets
void fill_range(size_t start, size_t num_cards, u_char offset) {
void fill_range(size_t start, size_t num_cards, uint8_t offset) {
void* start_ptr = &_offset_array[start];
// If collector is concurrent, special handling may be needed.
G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
memset(start_ptr, offset, num_cards);
}
protected:
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
u_char offset_array(size_t index) const {
uint8_t offset_array(size_t index) const {
assert(index < _vs.committed_size(), "index out of range");
return _offset_array[index];
}
// An assertion-checking helper method for the set_offset_array() methods below.
void check_reducing_assertion(bool reducing);
void set_offset_array(size_t index, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index < _vs.committed_size(), "index out of range");
assert(!reducing || _offset_array[index] >= offset, "Not reducing");
_offset_array[index] = offset;
}
void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index < _vs.committed_size(), "index out of range");
assert(high >= low, "addresses out of order");
assert(pointer_delta(high, low) <= BOTConstants::card_size_in_words(), "offset too large");
assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
"Not reducing");
_offset_array[index] = (u_char)pointer_delta(high, low);
}
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index_for(right - 1) < _vs.committed_size(),
"right address out of range");
assert(left < right, "Heap addresses out of order");
size_t num_cards = pointer_delta(right, left) >> BOTConstants::log_card_size_in_words();
fill_range(index_for(left), num_cards, offset);
}
void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(right < _vs.committed_size(), "right address out of range");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
fill_range(left, num_cards, offset);
}
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
assert(index < _vs.committed_size(), "index out of range");
assert(high >= low, "addresses out of order");
assert(pointer_delta(high, low) <= BOTConstants::card_size_in_words(), "offset too large");
assert(_offset_array[index] == pointer_delta(high, low),
"Wrong offset");
}
bool is_card_boundary(HeapWord* p) const;
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
// We always add an extra slot because if an object
// ends on a card boundary we put a 0 in the next
// offset array slot, so we want that slot always
// to be reserved.
static size_t compute_size(size_t mem_region_words) {
assert(mem_region_words % BOTConstants::card_size_in_words() == 0, "precondition");
size_t compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / BOTConstants::card_size_in_words()) + 1;
size_t number_of_slots = mem_region_words / BOTConstants::card_size_in_words();
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
@@ -207,198 +73,79 @@ public:
// (see "resize" below) up to the size of "_reserved" (which must be at
// least "init_word_size".) The contents of the initial table are
// undefined; it is the responsibility of the constituent
// BlockOffsetTable(s) to initialize cards.
BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
// SerialBlockOffsetTable(s) to initialize cards.
SerialBlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
// Notes a change in the committed size of the region covered by the
// table. The "new_word_size" may not be larger than the size of the
// reserved region this table covers.
void resize(size_t new_word_size);
void set_bottom(HeapWord* new_bottom);
// Whether entries should be initialized to zero. Used currently only for
// error checking.
void set_init_to_zero(bool val) { _init_to_zero = val; }
bool init_to_zero() { return _init_to_zero; }
// Updates all the BlockOffsetArray's sharing this shared array to
// reflect the current "top"'s of their spaces.
void update_offset_arrays(); // Not yet implemented!
// Return the appropriate index into "_offset_array" for "p".
size_t index_for(const void* p) const;
// Return the address indicating the start of the region corresponding to
// "index" in "_offset_array".
HeapWord* address_for_index(size_t index) const;
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
assert(index < _vs.committed_size(), "index out of range");
assert(high >= low, "addresses out of order");
assert(pointer_delta(high, low) < BOTConstants::card_size_in_words(), "offset too large");
_offset_array[index] = checked_cast<uint8_t>(pointer_delta(high, low));
}
void set_offset_array(size_t left, size_t right, uint8_t offset) {
assert(right < _vs.committed_size(), "right address out of range");
assert(left <= right, "precondition");
size_t num_cards = right - left + 1;
fill_range(left, num_cards, offset);
}
};
class Space;
//////////////////////////////////////////////////////////////////////////
// The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
//////////////////////////////////////////////////////////////////////////
class BlockOffsetArray: public BlockOffsetTable {
// SerialBlockOffsetTable divides the covered region into "N"-word subregions (where
// "N" = 2^"LogN". An array with an entry for each such subregion indicates
// how far back one must go to find the start of the chunk that includes the
// first word of the subregion.
class SerialBlockOffsetTable {
friend class VMStructs;
protected:
// The shared array, which is shared with other BlockOffsetArray's
// corresponding to different spaces within a generation or span of
// memory.
BlockOffsetSharedArray* _array;
// The space that owns this subregion.
Space* _sp;
// The array that contains offset values. Its reacts to heap resizing.
SerialBlockOffsetSharedArray* _array;
// If true, array entries are initialized to 0; otherwise, they are
// initialized to point backwards to the beginning of the covered region.
bool _init_to_zero;
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
// An assertion-checking helper method for the set_remainder*() methods below.
void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
// Sets the entries
// corresponding to the cards starting at "start" and ending at "end"
// to point back to the card before "start": the interval [start, end)
// is right-open. The last parameter, reducing, indicates whether the
// updates to individual entries always reduce the entry from a higher
// to a lower value. (For example this would hold true during a temporal
// regime during which only block splits were updating the BOT.
void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
// Same as above, except that the args here are a card _index_ interval
// that is closed: [start_index, end_index]
void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
// A helper function for BOT adjustment/verification work
void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
public:
// The space may not have its bottom and top set yet, which is why the
// region is passed as a parameter. If "init_to_zero" is true, the
// elements of the array are initialized to zero. Otherwise, they are
// initialized to point backwards to the beginning.
BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
bool init_to_zero_);
// Note: this ought to be part of the constructor, but that would require
// "this" to be passed as a parameter to a member constructor for
// the containing concrete subtype of Space.
// This would be legal C++, but MS VC++ doesn't allow it.
void set_space(Space* sp) { _sp = sp; }
// Resets the covered region to the given "mr".
void set_region(MemRegion mr) {
_bottom = mr.start();
_end = mr.end();
static HeapWord* align_up_by_card_size(HeapWord* const addr) {
return align_up(addr, BOTConstants::card_size());
}
// Note that the committed size of the covered space may have changed,
// so the table size might also wish to change.
virtual void resize(size_t new_word_size) {
HeapWord* new_end = _bottom + new_word_size;
if (_end < new_end && !init_to_zero()) {
// verify that the old and new boundaries are also card boundaries
assert(_array->is_card_boundary(_end),
"_end not a card boundary");
assert(_array->is_card_boundary(new_end),
"new _end would not be a card boundary");
// set all the newly added cards
_array->set_offset_array(_end, new_end, BOTConstants::card_size_in_words());
}
_end = new_end; // update _end
void verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const;
public:
// Initialize the table to cover the given space.
// The contents of the initial table are undefined.
SerialBlockOffsetTable(SerialBlockOffsetSharedArray* array) : _array(array) {
assert(BOTConstants::card_size() == CardTable::card_size(), "sanity");
}
// Adjust the BOT to show that it has a single block in the
// range [blk_start, blk_start + size). All necessary BOT
// cards are adjusted, but _unallocated_block isn't.
void single_block(HeapWord* blk_start, HeapWord* blk_end);
void single_block(HeapWord* blk, size_t size) {
single_block(blk, blk + size);
static bool is_crossing_card_boundary(HeapWord* const obj_start,
HeapWord* const obj_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
// Strictly greater-than, since we check if this block *crosses* card boundary.
return obj_end > cur_card_boundary;
}
// When the alloc_block() call returns, the block offset table should
// have enough information such that any subsequent block_start() call
// with an argument equal to an address that is within the range
// [blk_start, blk_end) would return the value blk_start, provided
// there have been no calls in between that reset this information
// (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
// for an appropriate range covering the said interval).
// These methods expect to be called with [blk_start, blk_end)
// representing a block of memory in the heap.
virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
void alloc_block(HeapWord* blk, size_t size) {
alloc_block(blk, blk + size);
}
// Returns the address of the start of the block reaching into the card containing
// "addr".
HeapWord* block_start_reaching_into_card(const void* addr) const;
// If true, initialize array slots with no allocated blocks to zero.
// Otherwise, make them point back to the front.
bool init_to_zero() { return _init_to_zero; }
// Corresponding setter
void set_init_to_zero(bool val) {
_init_to_zero = val;
assert(_array != nullptr, "_array should be non-null");
_array->set_init_to_zero(val);
}
// Debugging
// Return the index of the last entry in the "active" region.
virtual size_t last_active_index() const = 0;
// Verify the block offset table
void verify() const;
void check_all_cards(size_t left_card, size_t right_card) const;
};
////////////////////////////////////////////////////////////////////////////
// A subtype of BlockOffsetArray that takes advantage of the fact
// that its underlying space is a ContiguousSpace, so that its "active"
// region can be more efficiently tracked (than for a non-contiguous space).
////////////////////////////////////////////////////////////////////////////
class BlockOffsetArrayContigSpace: public BlockOffsetArray {
friend class VMStructs;
private:
// allocation boundary at which offset array must be updated
HeapWord* _next_offset_threshold;
size_t _next_offset_index; // index corresponding to that boundary
// Work function when allocation start crosses threshold.
void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
public:
BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
BlockOffsetArray(array, mr, true) {
_next_offset_threshold = nullptr;
_next_offset_index = 0;
}
void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
// Initialize the threshold for an empty heap.
void initialize_threshold();
// Zero out the entry for _bottom (offset will be zero)
void zero_bottom_entry();
// Return the next threshold, the point at which the table should be
// updated.
HeapWord* threshold() const { return _next_offset_threshold; }
// In general, these methods expect to be called with
// [blk_start, blk_end) representing a block of memory in the heap.
// In this implementation, however, we are OK even if blk_start and/or
// blk_end are null because null is represented as 0, and thus
// never exceeds the "_next_offset_threshold".
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
if (blk_end > _next_offset_threshold) {
alloc_block_work(blk_start, blk_end);
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}
void alloc_block(HeapWord* blk, size_t size) {
alloc_block(blk, blk + size);
}
HeapWord* block_start_unsafe(const void* addr) const;
// Debugging support
virtual size_t last_active_index() const;
};
#endif // SHARE_GC_SERIAL_SERIALBLOCKOFFSETTABLE_HPP

View File

@@ -27,24 +27,7 @@
#include "gc/serial/serialBlockOffsetTable.hpp"
#include "gc/shared/space.hpp"
#include "runtime/safepoint.hpp"
//////////////////////////////////////////////////////////////////////////
// BlockOffsetTable inlines
//////////////////////////////////////////////////////////////////////////
inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
if (addr >= _bottom && addr < _end) {
return block_start_unsafe(addr);
} else {
return nullptr;
}
}
//////////////////////////////////////////////////////////////////////////
// BlockOffsetSharedArray inlines
//////////////////////////////////////////////////////////////////////////
inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
inline size_t SerialBlockOffsetSharedArray::index_for(const void* p) const {
char* pc = (char*)p;
assert(pc >= (char*)_reserved.start() &&
pc < (char*)_reserved.end(),
@@ -55,7 +38,7 @@ inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
return result;
}
inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
inline HeapWord* SerialBlockOffsetSharedArray::address_for_index(size_t index) const {
assert(index < _vs.committed_size(), "bad index");
HeapWord* result = _reserved.start() + (index << BOTConstants::log_card_size_in_words());
assert(result >= _reserved.start() && result < _reserved.end(),
@@ -63,10 +46,4 @@ inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
return result;
}
inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
Thread::current()->is_VM_thread() ||
Thread::current()->is_ConcurrentGC_thread(), "Crack");
}
#endif // SHARE_GC_SERIAL_SERIALBLOCKOFFSETTABLE_INLINE_HPP

View File

@@ -295,8 +295,8 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
assert((uintptr_t(start) & 3) == 0, "bad alignment");
assert((reserved_byte_size & 3) == 0, "bad alignment");
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
_bts = new BlockOffsetSharedArray(reserved_mr,
heap_word_size(initial_byte_size));
_bts = new SerialBlockOffsetSharedArray(reserved_mr,
heap_word_size(initial_byte_size));
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
_rs->resize_covered_region(committed_mr);
@@ -474,11 +474,10 @@ void TenuredGeneration::object_iterate(ObjectClosure* blk) {
void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
// Create the BOT for the archive space.
TenuredSpace* space = _the_space;
space->initialize_threshold();
HeapWord* start = archive_space.start();
while (start < archive_space.end()) {
size_t word_size = cast_to_oop(start)->size();;
space->alloc_block(start, start + word_size);
space->update_for_block(start, start + word_size);
start += word_size;
}
}

View File

@@ -31,7 +31,7 @@
#include "gc/shared/generationCounters.hpp"
#include "utilities/macros.hpp"
class BlockOffsetSharedArray;
class SerialBlockOffsetSharedArray;
class CardTableRS;
class ContiguousSpace;
@@ -50,7 +50,7 @@ class TenuredGeneration: public Generation {
// This is shared with other generations.
CardTableRS* _rs;
// This is local to this generation.
BlockOffsetSharedArray* _bts;
SerialBlockOffsetSharedArray* _bts;
// Current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;

View File

@@ -29,38 +29,31 @@
#include "gc/serial/serialHeap.hpp"
#include "gc/serial/tenuredGeneration.hpp"
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field) \
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
nonstatic_field(TenuredGeneration, _bts, BlockOffsetSharedArray*) \
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
\
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
\
nonstatic_field(BlockOffsetTable, _bottom, HeapWord*) \
nonstatic_field(BlockOffsetTable, _end, HeapWord*) \
\
nonstatic_field(BlockOffsetSharedArray, _reserved, MemRegion) \
nonstatic_field(BlockOffsetSharedArray, _end, HeapWord*) \
nonstatic_field(BlockOffsetSharedArray, _vs, VirtualSpace) \
nonstatic_field(BlockOffsetSharedArray, _offset_array, u_char*) \
\
nonstatic_field(BlockOffsetArray, _array, BlockOffsetSharedArray*) \
nonstatic_field(BlockOffsetArray, _sp, Space*) \
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
\
nonstatic_field(TenuredSpace, _offsets, BlockOffsetArray)
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field) \
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
nonstatic_field(TenuredGeneration, _bts, SerialBlockOffsetSharedArray*) \
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
\
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
\
nonstatic_field(SerialBlockOffsetTable, _array, SerialBlockOffsetSharedArray*) \
\
nonstatic_field(SerialBlockOffsetSharedArray, _reserved, MemRegion) \
nonstatic_field(SerialBlockOffsetSharedArray, _vs, VirtualSpace) \
nonstatic_field(SerialBlockOffsetSharedArray, _offset_array, u_char*) \
\
nonstatic_field(TenuredSpace, _offsets, SerialBlockOffsetTable)
#define VM_TYPES_SERIALGC(declare_type, \
declare_toplevel_type, \
@@ -73,11 +66,8 @@
declare_type(CardTableRS, CardTable) \
\
declare_toplevel_type(TenuredGeneration*) \
declare_toplevel_type(BlockOffsetSharedArray) \
declare_toplevel_type(BlockOffsetTable) \
declare_type(BlockOffsetArray, BlockOffsetTable) \
declare_type(BlockOffsetArrayContigSpace, BlockOffsetArray) \
declare_toplevel_type(BlockOffsetSharedArray*)
declare_toplevel_type(SerialBlockOffsetSharedArray) \
declare_toplevel_type(SerialBlockOffsetTable)
#define VM_INT_CONSTANTS_SERIALGC(declare_constant, \
declare_constant_with_value)

View File

@@ -87,25 +87,6 @@ bool ContiguousSpace::is_free_block(const HeapWord* p) const {
return p >= _top;
}
#if INCLUDE_SERIALGC
void TenuredSpace::clear(bool mangle_space) {
ContiguousSpace::clear(mangle_space);
_offsets.initialize_threshold();
}
void TenuredSpace::set_bottom(HeapWord* new_bottom) {
Space::set_bottom(new_bottom);
_offsets.set_bottom(new_bottom);
}
void TenuredSpace::set_end(HeapWord* new_end) {
// Space should not advertise an increase in size
// until after the underlying offset table has been enlarged.
_offsets.resize(pointer_delta(new_end, bottom()));
Space::set_end(new_end);
}
#endif // INCLUDE_SERIALGC
#ifndef PRODUCT
void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
@@ -152,7 +133,6 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
}
compact_top = cp->space->bottom();
cp->space->set_compaction_top(compact_top);
cp->space->initialize_threshold();
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
}
@@ -172,7 +152,7 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
// We need to update the offset table so that the beginnings of objects can be
// found during scavenge. Note that we are updating the offset table based on
// where the object will be once the compaction phase finishes.
cp->space->alloc_block(compact_top - size, compact_top);
cp->space->update_for_block(compact_top - size, compact_top);
return compact_top;
}
@@ -190,7 +170,6 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
assert(cp->gen != nullptr, "need a generation");
assert(cp->gen->first_compaction_space() == this, "just checking");
cp->space = cp->gen->first_compaction_space();
cp->space->initialize_threshold();
cp->space->set_compaction_top(cp->space->bottom());
}
@@ -384,9 +363,8 @@ void ContiguousSpace::print_on(outputStream* st) const {
#if INCLUDE_SERIALGC
void TenuredSpace::print_on(outputStream* st) const {
print_short_on(st);
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", "
PTR_FORMAT ", " PTR_FORMAT ")",
p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
p2i(bottom()), p2i(top()), p2i(end()));
}
#endif
@@ -510,20 +488,30 @@ HeapWord* ContiguousSpace::par_allocate(size_t size) {
}
#if INCLUDE_SERIALGC
void TenuredSpace::initialize_threshold() {
_offsets.initialize_threshold();
void TenuredSpace::update_for_block(HeapWord* start, HeapWord* end) {
_offsets.update_for_block(start, end);
}
void TenuredSpace::alloc_block(HeapWord* start, HeapWord* end) {
_offsets.alloc_block(start, end);
HeapWord* TenuredSpace::block_start_const(const void* addr) const {
HeapWord* cur_block = _offsets.block_start_reaching_into_card(addr);
while (true) {
HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
if (next_block > addr) {
assert(cur_block <= addr, "postcondition");
return cur_block;
}
cur_block = next_block;
// Because the BOT is precise, we should never step into the next card
// (i.e. crossing the card boundary).
assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
}
}
TenuredSpace::TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
TenuredSpace::TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
_offsets(sharedOffsetArray, mr),
_par_alloc_lock(Mutex::safepoint, "TenuredSpaceParAlloc_lock", true)
_offsets(sharedOffsetArray)
{
_offsets.set_contig_space(this);
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
}
@@ -536,10 +524,6 @@ void TenuredSpace::verify() const {
int objs = 0;
int blocks = 0;
if (VerifyObjectStartArray) {
_offsets.verify();
}
while (p < top()) {
size_t size = cast_to_oop(p)->size();
// For a sampling of objects in the space, find it using the

View File

@@ -47,11 +47,6 @@
// Forward decls.
class Space;
class ContiguousSpace;
#if INCLUDE_SERIALGC
class BlockOffsetArray;
class BlockOffsetArrayContigSpace;
class BlockOffsetTable;
#endif
class Generation;
class ContiguousSpace;
class CardTableRS;
@@ -241,7 +236,7 @@ private:
// This the function to invoke when an allocation of an object covering
// "start" to "end" occurs to update other internal data structures.
virtual void alloc_block(HeapWord* start, HeapWord* the_end) { }
virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
GenSpaceMangler* mangler() { return _mangler; }
@@ -308,11 +303,6 @@ private:
// live part of a compacted space ("deadwood" support.)
virtual size_t allowed_dead_ratio() const { return 0; };
// Some contiguous spaces may maintain some data structures that should
// be updated whenever an allocation crosses a boundary. This function
// initializes these data structures for further updates.
virtual void initialize_threshold() { }
// "q" is an object of the given "size" that should be forwarded;
// "cp" names the generation ("gen") and containing "this" (which must
// also equal "cp->space"). "compact_top" is where in "this" the
@@ -322,7 +312,7 @@ private:
// be one, since compaction must succeed -- we go to the first space of
// the previous generation if necessary, updating "cp"), reset compact_top
// and then forward. In either case, returns the new value of "compact_top".
// Invokes the "alloc_block" function of the then-current compaction
// Invokes the "update_for_block" function of the then-current compaction
// space.
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
HeapWord* compact_top);
@@ -412,36 +402,28 @@ private:
#if INCLUDE_SERIALGC
// Class TenuredSpace is used by TenuredGeneration; it supports an efficient
// "block_start" operation via a BlockOffsetArray (whose BlockOffsetSharedArray
// may be shared with other spaces.)
// "block_start" operation via a SerialBlockOffsetTable.
class TenuredSpace: public ContiguousSpace {
friend class VMStructs;
protected:
BlockOffsetArrayContigSpace _offsets;
Mutex _par_alloc_lock;
SerialBlockOffsetTable _offsets;
// Mark sweep support
size_t allowed_dead_ratio() const override;
public:
// Constructor
TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr);
void set_bottom(HeapWord* value) override;
void set_end(HeapWord* value) override;
void clear(bool mangle_space) override;
inline HeapWord* block_start_const(const void* p) const override;
HeapWord* block_start_const(const void* addr) const override;
// Add offset table update.
inline HeapWord* allocate(size_t word_size) override;
inline HeapWord* par_allocate(size_t word_size) override;
// MarkSweep support phase3
void initialize_threshold() override;
void alloc_block(HeapWord* start, HeapWord* end) override;
void update_for_block(HeapWord* start, HeapWord* end) override;
void print_on(outputStream* st) const override;

View File

@@ -47,36 +47,19 @@ inline HeapWord* Space::block_start(const void* p) {
inline HeapWord* TenuredSpace::allocate(size_t size) {
HeapWord* res = ContiguousSpace::allocate(size);
if (res != nullptr) {
_offsets.alloc_block(res, size);
_offsets.update_for_block(res, res + size);
}
return res;
}
// Because of the requirement of keeping "_offsets" up to date with the
// allocations, we sequentialize these with a lock. Therefore, best if
// this is used for larger LAB allocations only.
inline HeapWord* TenuredSpace::par_allocate(size_t size) {
MutexLocker x(&_par_alloc_lock);
// This ought to be just "allocate", because of the lock above, but that
// ContiguousSpace::allocate asserts that either the allocating thread
// holds the heap lock or it is the VM thread and we're at a safepoint.
// The best I (dld) could figure was to put a field in ContiguousSpace
// meaning "locking at safepoint taken care of", and set/reset that
// here. But this will do for now, especially in light of the comment
// above. Perhaps in the future some lock-free manner of keeping the
// coordination.
HeapWord* res = ContiguousSpace::par_allocate(size);
if (res != nullptr) {
_offsets.alloc_block(res, size);
_offsets.update_for_block(res, res + size);
}
return res;
}
inline HeapWord*
TenuredSpace::block_start_const(const void* p) const {
return _offsets.block_start(p);
}
class DeadSpacer : StackObj {
size_t _allowed_deadspace_words;
bool _active;

View File

@@ -91,9 +91,6 @@ class TaskTerminator : public CHeapObj<mtGC> {
size_t tasks_in_queue_set() const;
// Perform one iteration of spin-master work.
void do_delay_step(DelayContext& delay_context);
NONCOPYABLE(TaskTerminator);
public:

View File

@@ -23,6 +23,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/compilerThread.hpp"
#include "gc/shared/collectedHeap.hpp"
@@ -53,6 +54,29 @@ volatile intx JVMCI::_fatal_log_init_thread = -1;
volatile int JVMCI::_fatal_log_fd = -1;
const char* JVMCI::_fatal_log_filename = nullptr;
CompilerThreadCanCallJava::CompilerThreadCanCallJava(JavaThread* current, bool new_state) {
_current = nullptr;
if (current->is_Compiler_thread()) {
CompilerThread* ct = CompilerThread::cast(current);
if (ct->_can_call_java != new_state &&
ct->_compiler != nullptr &&
ct->_compiler->is_jvmci())
{
// Only enter a new context if the ability of the
// current thread to call Java actually changes
_reset_state = ct->_can_call_java;
ct->_can_call_java = new_state;
_current = ct;
}
}
}
CompilerThreadCanCallJava::~CompilerThreadCanCallJava() {
if (_current != nullptr) {
_current->_can_call_java = _reset_state;
}
}
void jvmci_vmStructs_init() NOT_DEBUG_RETURN;
bool JVMCI::can_initialize_JVMCI() {
@@ -176,6 +200,10 @@ void JVMCI::ensure_box_caches_initialized(TRAPS) {
java_lang_Long_LongCache::symbol()
};
// Class resolution and initialization below
// requires calling into Java
CompilerThreadCanCallJava ccj(THREAD, true);
for (unsigned i = 0; i < sizeof(box_classes) / sizeof(Symbol*); i++) {
Klass* k = SystemDictionary::resolve_or_fail(box_classes[i], true, CHECK);
InstanceKlass* ik = InstanceKlass::cast(k);

View File

@@ -29,6 +29,7 @@
#include "utilities/exceptions.hpp"
class BoolObjectClosure;
class CompilerThread;
class constantPoolHandle;
class JavaThread;
class JVMCIEnv;
@@ -46,6 +47,34 @@ typedef FormatStringEventLog<256> StringEventLog;
struct _jmetadata;
typedef struct _jmetadata *jmetadata;
// A stack object that manages a scope in which the current thread, if
// it's a CompilerThread, can have its CompilerThread::_can_call_java
// field changed. This allows restricting libjvmci better in terms
// of when it can make Java calls. If a Java call on a CompilerThread
// reaches a clinit, there's a risk of dead-lock when async compilation
// is disabled (e.g. -Xbatch or -Xcomp) as the non-CompilerThread thread
// waiting for the blocking compilation may hold the clinit lock.
//
// This scope is primarily used to disable Java calls when libjvmci enters
// the VM via a C2V (i.e. CompilerToVM) native method.
class CompilerThreadCanCallJava : StackObj {
private:
CompilerThread* _current; // Only non-null if state of thread changed
bool _reset_state; // Value prior to state change, undefined
// if no state change.
public:
// Enters a scope in which the ability of the current CompilerThread
// to call Java is specified by `new_state`. This call only makes a
// change if the current thread is a CompilerThread associated with
// a JVMCI compiler whose CompilerThread::_can_call_java is not
// currently `new_state`.
CompilerThreadCanCallJava(JavaThread* current, bool new_state);
// Resets CompilerThread::_can_call_java of the current thread if the
// constructor changed it.
~CompilerThreadCanCallJava();
};
class JVMCI : public AllStatic {
friend class JVMCIRuntime;
friend class JVMCIEnv;

View File

@@ -165,14 +165,19 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) {
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \
ThreadInVMfromNative __tiv(thread); \
HandleMarkCleaner __hm(thread); \
JavaThread* THREAD = thread; \
JavaThread* THREAD = thread; \
debug_only(VMNativeEntryWrapper __vew;)
// Native method block that transitions current thread to '_thread_in_vm'.
#define C2V_BLOCK(result_type, name, signature) \
JVMCI_VM_ENTRY_MARK; \
ResourceMark rm; \
JVMCIENV_FROM_JNI(JVMCI::compilation_tick(thread), env);
// Note: CompilerThreadCanCallJava must precede JVMCIENV_FROM_JNI so that
// the translation of an uncaught exception in the JVMCIEnv does not make
// a Java call when __is_hotspot == false.
#define C2V_BLOCK(result_type, name, signature) \
JVMCI_VM_ENTRY_MARK; \
ResourceMark rm; \
bool __is_hotspot = env == thread->jni_environment(); \
CompilerThreadCanCallJava ccj(thread, __is_hotspot); \
JVMCIENV_FROM_JNI(JVMCI::compilation_tick(thread), env); \
static JavaThread* get_current_thread(bool allow_null=true) {
Thread* thread = Thread::current_or_null_safe();
@@ -188,7 +193,7 @@ static JavaThread* get_current_thread(bool allow_null=true) {
#define C2V_VMENTRY(result_type, name, signature) \
JNIEXPORT result_type JNICALL c2v_ ## name signature { \
JavaThread* thread = get_current_thread(); \
if (thread == nullptr) { \
if (thread == nullptr) { \
env->ThrowNew(JNIJVMCI::InternalError::clazz(), \
err_msg("Cannot call into HotSpot from JVMCI shared library without attaching current thread")); \
return; \
@@ -199,7 +204,7 @@ static JavaThread* get_current_thread(bool allow_null=true) {
#define C2V_VMENTRY_(result_type, name, signature, result) \
JNIEXPORT result_type JNICALL c2v_ ## name signature { \
JavaThread* thread = get_current_thread(); \
if (thread == nullptr) { \
if (thread == nullptr) { \
env->ThrowNew(JNIJVMCI::InternalError::clazz(), \
err_msg("Cannot call into HotSpot from JVMCI shared library without attaching current thread")); \
return result; \
@@ -221,7 +226,7 @@ static JavaThread* get_current_thread(bool allow_null=true) {
#define JNI_THROW(caller, name, msg) do { \
jint __throw_res = env->ThrowNew(JNIJVMCI::name::clazz(), msg); \
if (__throw_res != JNI_OK) { \
tty->print_cr("Throwing " #name " in " caller " returned %d", __throw_res); \
JVMCI_event_1("Throwing " #name " in " caller " returned %d", __throw_res); \
} \
return; \
} while (0);
@@ -229,7 +234,7 @@ static JavaThread* get_current_thread(bool allow_null=true) {
#define JNI_THROW_(caller, name, msg, result) do { \
jint __throw_res = env->ThrowNew(JNIJVMCI::name::clazz(), msg); \
if (__throw_res != JNI_OK) { \
tty->print_cr("Throwing " #name " in " caller " returned %d", __throw_res); \
JVMCI_event_1("Throwing " #name " in " caller " returned %d", __throw_res); \
} \
return result; \
} while (0)
@@ -579,6 +584,7 @@ C2V_VMENTRY_0(jboolean, shouldInlineMethod,(JNIEnv* env, jobject, ARGUMENT_PAIR(
C2V_END
C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGUMENT_PAIR(accessing_klass), jint accessing_klass_loader, jboolean resolve))
CompilerThreadCanCallJava canCallJava(thread, resolve); // Resolution requires Java calls
JVMCIObject name = JVMCIENV->wrap(jname);
const char* str = JVMCIENV->as_utf8_string(name);
TempNewSymbol class_name = SymbolTable::new_symbol(str);
@@ -592,7 +598,7 @@ C2V_VMENTRY_NULL(jobject, lookupType, (JNIEnv* env, jobject, jstring jname, ARGU
if (val != nullptr) {
if (strstr(val, "<trace>") != nullptr) {
tty->print_cr("CompilerToVM.lookupType: %s", str);
} else if (strstr(val, str) != nullptr) {
} else if (strstr(str, val) != nullptr) {
THROW_MSG_0(vmSymbols::java_lang_Exception(),
err_msg("lookupTypeException: %s", str));
}
@@ -938,6 +944,17 @@ C2V_VMENTRY_NULL(jobject, resolveFieldInPool, (JNIEnv* env, jobject, ARGUMENT_PA
Bytecodes::Code code = (Bytecodes::Code)(((int) opcode) & 0xFF);
fieldDescriptor fd;
methodHandle mh(THREAD, UNPACK_PAIR(Method, method));
Bytecodes::Code bc = (Bytecodes::Code) (((int) opcode) & 0xFF);
int holder_index = cp->klass_ref_index_at(index, bc);
if (!cp->tag_at(holder_index).is_klass() && !THREAD->can_call_java()) {
// If the holder is not resolved in the constant pool and the current
// thread cannot call Java, return null. This avoids a Java call
// in LinkInfo to load the holder.
Symbol* klass_name = cp->klass_ref_at_noresolve(index, bc);
return nullptr;
}
LinkInfo link_info(cp, index, mh, code, CHECK_NULL);
LinkResolver::resolve_field(fd, link_info, Bytecodes::java_code(code), false, CHECK_NULL);
JVMCIPrimitiveArray info = JVMCIENV->wrap(info_handle);
@@ -2726,6 +2743,7 @@ C2V_VMENTRY_0(jlong, translate, (JNIEnv* env, jobject, jobject obj_handle, jbool
return 0L;
}
PEER_JVMCIENV_FROM_THREAD(THREAD, !JVMCIENV->is_hotspot());
CompilerThreadCanCallJava canCallJava(thread, PEER_JVMCIENV->is_hotspot());
PEER_JVMCIENV->check_init(JVMCI_CHECK_0);
JVMCIEnv* thisEnv = JVMCIENV;
@@ -2779,11 +2797,11 @@ C2V_VMENTRY_0(jlong, translate, (JNIEnv* env, jobject, jobject obj_handle, jbool
} else {
// Link the new HotSpotNmethod to the nmethod
PEER_JVMCIENV->initialize_installed_code(result, nm, JVMCI_CHECK_0);
// Only HotSpotNmethod instances in the HotSpot heap are tracked directly by the runtime.
if (PEER_JVMCIENV->is_hotspot()) {
// Only non-default HotSpotNmethod instances in the HotSpot heap are tracked directly by the runtime.
if (!isDefault && PEER_JVMCIENV->is_hotspot()) {
JVMCINMethodData* data = nm->jvmci_nmethod_data();
if (data == nullptr) {
JVMCI_THROW_MSG_0(IllegalArgumentException, "Cannot set HotSpotNmethod mirror for default nmethod");
JVMCI_THROW_MSG_0(IllegalArgumentException, "Missing HotSpotNmethod data");
}
if (data->get_nmethod_mirror(nm, /* phantom_ref */ false) != nullptr) {
JVMCI_THROW_MSG_0(IllegalArgumentException, "Cannot overwrite existing HotSpotNmethod mirror for nmethod");
@@ -2945,18 +2963,21 @@ static jbyteArray get_encoded_annotation_data(InstanceKlass* holder, AnnotationA
C2V_VMENTRY_NULL(jbyteArray, getEncodedClassAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass),
jobject filter, jint filter_length, jlong filter_klass_pointers))
CompilerThreadCanCallJava canCallJava(thread, true); // Requires Java support
InstanceKlass* holder = InstanceKlass::cast(UNPACK_PAIR(Klass, klass));
return get_encoded_annotation_data(holder, holder->class_annotations(), true, filter_length, filter_klass_pointers, THREAD, JVMCIENV);
C2V_END
C2V_VMENTRY_NULL(jbyteArray, getEncodedExecutableAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(method),
jobject filter, jint filter_length, jlong filter_klass_pointers))
CompilerThreadCanCallJava canCallJava(thread, true); // Requires Java support
methodHandle method(THREAD, UNPACK_PAIR(Method, method));
return get_encoded_annotation_data(method->method_holder(), method->annotations(), false, filter_length, filter_klass_pointers, THREAD, JVMCIENV);
C2V_END
C2V_VMENTRY_NULL(jbyteArray, getEncodedFieldAnnotationData, (JNIEnv* env, jobject, ARGUMENT_PAIR(klass), jint index,
jobject filter, jint filter_length, jlong filter_klass_pointers))
CompilerThreadCanCallJava canCallJava(thread, true); // Requires Java support
InstanceKlass* holder = check_field(InstanceKlass::cast(UNPACK_PAIR(Klass, klass)), index, JVMCIENV);
fieldDescriptor fd(holder, index);
return get_encoded_annotation_data(holder, fd.annotations(), false, filter_length, filter_klass_pointers, THREAD, JVMCIENV);
@@ -3013,6 +3034,7 @@ C2V_VMENTRY_0(jboolean, addFailedSpeculation, (JNIEnv* env, jobject, jlong faile
C2V_END
C2V_VMENTRY(void, callSystemExit, (JNIEnv* env, jobject, jint status))
CompilerThreadCanCallJava canCallJava(thread, true);
JavaValue result(T_VOID);
JavaCallArguments jargs(1);
jargs.push_int(status);

View File

@@ -448,6 +448,15 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
private:
const Handle& _throwable;
char* print_throwable_to_buffer(Handle throwable, jlong buffer, int buffer_size) {
char* char_buffer = (char*) buffer + 4;
stringStream st(char_buffer, (size_t) buffer_size - 4);
java_lang_Throwable::print_stack_trace(throwable, &st);
u4 len = (u4) st.size();
*((u4*) buffer) = len;
return char_buffer;
}
bool handle_pending_exception(JavaThread* THREAD, jlong buffer, int buffer_size) {
if (HAS_PENDING_EXCEPTION) {
Handle throwable = Handle(THREAD, PENDING_EXCEPTION);
@@ -457,11 +466,7 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
JVMCI_event_1("error translating exception: OutOfMemoryError");
decode(THREAD, _encode_oome_fail, 0L);
} else {
char* char_buffer = (char*) buffer + 4;
stringStream st(char_buffer, (size_t) buffer_size - 4);
java_lang_Throwable::print_stack_trace(throwable, &st);
u4 len = (u4) st.size();
*((u4*) buffer) = len;
char* char_buffer = print_throwable_to_buffer(throwable, buffer, buffer_size);
JVMCI_event_1("error translating exception: %s", char_buffer);
decode(THREAD, _encode_fail, buffer);
}
@@ -471,6 +476,13 @@ class HotSpotToSharedLibraryExceptionTranslation : public ExceptionTranslation {
}
int encode(JavaThread* THREAD, jlong buffer, int buffer_size) {
if (!THREAD->can_call_java()) {
char* char_buffer = print_throwable_to_buffer(_throwable, buffer, buffer_size);
const char* detail = log_is_enabled(Info, exceptions) ? "" : " (-Xlog:exceptions may give more detail)";
JVMCI_event_1("cannot call Java to translate exception%s: %s", detail, char_buffer);
decode(THREAD, _encode_fail, buffer);
return 0;
}
Klass* vmSupport = SystemDictionary::resolve_or_fail(vmSymbols::jdk_internal_vm_VMSupport(), true, THREAD);
if (handle_pending_exception(THREAD, buffer, buffer_size)) {
return 0;
@@ -1311,6 +1323,7 @@ JVMCIObject JVMCIEnv::get_jvmci_type(const JVMCIKlassHandle& klass, JVMCI_TRAPS)
JavaThread* THREAD = JVMCI::compilation_tick(JavaThread::current()); // For exception macros.
jboolean exception = false;
if (is_hotspot()) {
CompilerThreadCanCallJava ccj(THREAD, true);
JavaValue result(T_OBJECT);
JavaCallArguments args;
args.push_long(pointer);

View File

@@ -824,10 +824,10 @@ oop JVMCINMethodData::get_nmethod_mirror(nmethod* nm, bool phantom_ref) {
}
void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) {
assert(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod");
guarantee(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod");
oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
assert(new_mirror != nullptr, "use clear_nmethod_mirror to clear the mirror");
assert(*addr == nullptr, "cannot overwrite non-null mirror");
guarantee(new_mirror != nullptr, "use clear_nmethod_mirror to clear the mirror");
guarantee(*addr == nullptr, "cannot overwrite non-null mirror");
*addr = new_mirror;
@@ -1819,57 +1819,6 @@ Klass* JVMCIRuntime::get_klass_by_index(const constantPoolHandle& cpool,
return result;
}
// ------------------------------------------------------------------
// Implementation of get_field_by_index.
//
// Implementation note: the results of field lookups are cached
// in the accessor klass.
void JVMCIRuntime::get_field_by_index_impl(InstanceKlass* klass, fieldDescriptor& field_desc,
int index, Bytecodes::Code bc) {
JVMCI_EXCEPTION_CONTEXT;
assert(klass->is_linked(), "must be linked before using its constant-pool");
constantPoolHandle cpool(thread, klass->constants());
// Get the field's name, signature, and type.
Symbol* name = cpool->name_ref_at(index, bc);
int nt_index = cpool->name_and_type_ref_index_at(index, bc);
int sig_index = cpool->signature_ref_index_at(nt_index);
Symbol* signature = cpool->symbol_at(sig_index);
// Get the field's declared holder.
int holder_index = cpool->klass_ref_index_at(index, bc);
bool holder_is_accessible;
Klass* declared_holder = get_klass_by_index(cpool, holder_index,
holder_is_accessible,
klass);
// The declared holder of this field may not have been loaded.
// Bail out with partial field information.
if (!holder_is_accessible) {
return;
}
// Perform the field lookup.
Klass* canonical_holder =
InstanceKlass::cast(declared_holder)->find_field(name, signature, &field_desc);
if (canonical_holder == nullptr) {
return;
}
assert(canonical_holder == field_desc.field_holder(), "just checking");
}
// ------------------------------------------------------------------
// Get a field by index from a klass's constant pool.
void JVMCIRuntime::get_field_by_index(InstanceKlass* accessor, fieldDescriptor& fd, int index, Bytecodes::Code bc) {
ResourceMark rm;
return get_field_by_index_impl(accessor, fd, index, bc);
}
// ------------------------------------------------------------------
// Perform an appropriate method lookup based on accessor, holder,
// name, signature, and bytecode.

View File

@@ -231,8 +231,6 @@ class JVMCIRuntime: public CHeapObj<mtJVMCI> {
int klass_index,
bool& is_accessible,
Klass* loading_klass);
static void get_field_by_index_impl(InstanceKlass* loading_klass, fieldDescriptor& fd,
int field_index, Bytecodes::Code bc);
static Method* get_method_by_index_impl(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc,
InstanceKlass* loading_klass);
@@ -417,8 +415,6 @@ class JVMCIRuntime: public CHeapObj<mtJVMCI> {
int klass_index,
bool& is_accessible,
Klass* loading_klass);
static void get_field_by_index(InstanceKlass* loading_klass, fieldDescriptor& fd,
int field_index, Bytecodes::Code bc);
static Method* get_method_by_index(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc,
InstanceKlass* loading_klass);

View File

@@ -29,6 +29,7 @@
#include "classfile/javaClasses.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "code/nmethod.hpp"
#include "compiler/compilationMemoryStatistic.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/disassembler.hpp"
@@ -661,6 +662,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
_vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr),
_late_inlines_pos(0),
_number_of_mh_late_inlines(0),
_oom(false),
_print_inlining_stream(new (mtCompiler) stringStream()),
_print_inlining_list(nullptr),
_print_inlining_idx(0),
@@ -938,6 +940,7 @@ Compile::Compile( ciEnv* ci_env,
_types(nullptr),
_node_hash(nullptr),
_number_of_mh_late_inlines(0),
_oom(false),
_print_inlining_stream(new (mtCompiler) stringStream()),
_print_inlining_list(nullptr),
_print_inlining_idx(0),
@@ -5261,3 +5264,6 @@ Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGV
return result;
}
void Compile::record_method_not_compilable_oom() {
record_method_not_compilable(CompilationMemoryStatistic::failure_reason_memlimit());
}

View File

@@ -460,6 +460,9 @@ private:
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
// "MemLimit" directive was specified and the memory limit was hit during compilation
bool _oom;
// Inlining may not happen in parse order which would make
// PrintInlining output confusing. Keep track of PrintInlining
// pieces in order.
@@ -503,6 +506,8 @@ private:
void log_late_inline_failure(CallGenerator* cg, const char* msg);
DEBUG_ONLY(bool _exception_backedge;)
void record_method_not_compilable_oom();
public:
void* barrier_set_state() const { return _barrier_set_state; }
@@ -814,6 +819,10 @@ private:
record_failure(reason);
}
bool check_node_count(uint margin, const char* reason) {
if (oom()) {
record_method_not_compilable_oom();
return true;
}
if (live_nodes() + margin > max_node_limit()) {
record_method_not_compilable(reason);
return true;
@@ -821,6 +830,8 @@ private:
return false;
}
}
bool oom() const { return _oom; }
void set_oom() { _oom = true; }
// Node management
uint unique() const { return _unique; }

View File

@@ -103,6 +103,7 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#include "utilities/utf8.hpp"
#include "utilities/zipLibrary.hpp"
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
@@ -3413,8 +3414,7 @@ JVM_END
// Library support ///////////////////////////////////////////////////////////////////////////
JVM_LEAF(void*, JVM_LoadZipLibrary())
ClassLoader::load_zip_library_if_needed();
return ClassLoader::zip_library_handle();
return ZipLibrary::handle();
JVM_END
JVM_ENTRY_NO_ENV(void*, JVM_LoadLibrary(const char* name, jboolean throwException))

View File

@@ -79,7 +79,7 @@ JavaThread* UpcallLinker::on_entry(UpcallStub::FrameData* context, jobject recei
guarantee(thread->thread_state() == _thread_in_native, "wrong thread state for upcall");
context->thread = thread;
assert(thread->can_call_java(), "must be able to call Java");
guarantee(thread->can_call_java(), "must be able to call Java");
// Allocate handle block for Java code. This must be done before we change thread_state to _thread_in_Java,
// since it can potentially block.

View File

@@ -115,7 +115,6 @@ Monitor* Notification_lock = nullptr;
Monitor* PeriodicTask_lock = nullptr;
Monitor* RedefineClasses_lock = nullptr;
Mutex* Verify_lock = nullptr;
Monitor* Zip_lock = nullptr;
#if INCLUDE_JFR
Mutex* JfrStacktrace_lock = nullptr;
@@ -324,7 +323,6 @@ void mutex_init() {
MUTEX_DEFN(ScratchObjects_lock , PaddedMutex , nosafepoint-1); // Holds DumpTimeTable_lock
#endif // INCLUDE_CDS
MUTEX_DEFN(Bootclasspath_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(Zip_lock , PaddedMonitor, nosafepoint-1); // Holds DumpTimeTable_lock
#if INCLUDE_JVMCI
// JVMCIRuntime::_lock must be acquired before JVMCI_lock to avoid deadlock

View File

@@ -111,7 +111,6 @@ extern Monitor* Notification_lock; // a lock used for notification
extern Monitor* PeriodicTask_lock; // protects the periodic task structure
extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition
extern Mutex* Verify_lock; // synchronize initialization of verify library
extern Monitor* Zip_lock; // synchronize initialization of zip library
extern Monitor* ThreadsSMRDelete_lock; // Used by ThreadsSMRSupport to take pressure off the Threads_lock
extern Mutex* ThreadIdTableCreate_lock; // Used by ThreadIdTable to lazily create the thread id table
extern Mutex* SharedDecoder_lock; // serializes access to the decoder during normal (not error reporting) use

View File

@@ -25,11 +25,9 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "runtime/arguments.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "services/heapDumperCompression.hpp"
#include "utilities/zipLibrary.hpp"
char const* FileWriter::open_writer() {
@@ -62,74 +60,28 @@ char const* FileWriter::write_buf(char* buf, ssize_t size) {
return nullptr;
}
typedef char const* (*GzipInitFunc)(size_t, size_t*, size_t*, int);
typedef size_t(*GzipCompressFunc)(char*, size_t, char*, size_t, char*, size_t,
int, char*, char const**);
static GzipInitFunc gzip_init_func;
static GzipCompressFunc gzip_compress_func;
void* GZipCompressor::load_gzip_func(char const* name) {
char path[JVM_MAXPATHLEN];
char ebuf[1024];
void* handle;
MutexLocker locker(Zip_lock, Monitor::_no_safepoint_check_flag);
if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
handle = os::dll_load(path, ebuf, sizeof ebuf);
if (handle != nullptr) {
return os::dll_lookup(handle, name);
}
}
return nullptr;
}
char const* GZipCompressor::init(size_t block_size, size_t* needed_out_size,
size_t* needed_tmp_size) {
_block_size = block_size;
_is_first = true;
if (gzip_compress_func == nullptr) {
gzip_compress_func = (GzipCompressFunc) load_gzip_func("ZIP_GZip_Fully");
if (gzip_compress_func == nullptr) {
return "Cannot get ZIP_GZip_Fully function";
}
}
if (gzip_init_func == nullptr) {
gzip_init_func = (GzipInitFunc) load_gzip_func("ZIP_GZip_InitParams");
if (gzip_init_func == nullptr) {
return "Cannot get ZIP_GZip_InitParams function";
}
}
char const* result = gzip_init_func(block_size, needed_out_size,
needed_tmp_size, _level);
char const* result = ZipLibrary::init_params(block_size, needed_out_size,
needed_tmp_size, _level);
*needed_out_size += 1024; // Add extra space for the comment in the first chunk.
return result;
}
char const* GZipCompressor::compress(char* in, size_t in_size, char* out, size_t out_size,
char* tmp, size_t tmp_size, size_t* compressed_size) {
char const* msg = nullptr;
if (_is_first) {
char buf[128];
// Write the block size used as a comment in the first gzip chunk, so the
// code used to read it later can make a good choice of the buffer sizes it uses.
jio_snprintf(buf, sizeof(buf), "HPROF BLOCKSIZE=" SIZE_FORMAT, _block_size);
*compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
buf, &msg);
*compressed_size = ZipLibrary::compress(in, in_size, out, out_size, tmp, tmp_size, _level, buf, &msg);
_is_first = false;
} else {
*compressed_size = gzip_compress_func(in, in_size, out, out_size, tmp, tmp_size, _level,
nullptr, &msg);
*compressed_size = ZipLibrary::compress(in, in_size, out, out_size, tmp, tmp_size, _level, nullptr, &msg);
}
return msg;

View File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2020 SAP SE. All rights reserved.
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -90,8 +91,6 @@ private:
size_t _block_size;
bool _is_first;
void* load_gzip_func(char const* name);
public:
GZipCompressor(int level) : _level(level), _block_size(0), _is_first(false) {
}

View File

@@ -250,7 +250,8 @@ enum VMErrorType : unsigned int {
OOM_MALLOC_ERROR = 0xe0000001,
OOM_MMAP_ERROR = 0xe0000002,
OOM_MPROTECT_ERROR = 0xe0000003,
OOM_JAVA_HEAP_FATAL = 0xe0000004
OOM_JAVA_HEAP_FATAL = 0xe0000004,
OOM_HOTSPOT_ARENA = 0xe0000005
};
// error reporting helper functions

View File

@@ -144,7 +144,7 @@ class VMError : public AllStatic {
static jlong get_step_start_time();
static void clear_step_start_time();
WINDOWS_ONLY(ATTRIBUTE_NORETURN static void raise_fail_fast(void* exrecord, void* context);)
WINDOWS_ONLY([[noreturn]] static void raise_fail_fast(void* exrecord, void* context);)
public:

View File

@@ -0,0 +1,197 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvm_io.h"
#include "runtime/arguments.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/zipLibrary.hpp"
// Entry points in zip.dll for loading zip/jar file entries
typedef void**(*ZIP_Open_t)(const char* name, char** pmsg);
typedef void(*ZIP_Close_t)(jzfile* zip);
typedef jzentry* (*ZIP_FindEntry_t)(jzfile* zip, const char* name, jint* sizeP, jint* nameLen);
typedef jboolean(*ZIP_ReadEntry_t)(jzfile* zip, jzentry* entry, unsigned char* buf, char* namebuf);
typedef jint(*ZIP_CRC32_t)(jint crc, const jbyte* buf, jint len);
typedef const char* (*ZIP_GZip_InitParams_t)(size_t, size_t*, size_t*, int);
typedef size_t(*ZIP_GZip_Fully_t)(char*, size_t, char*, size_t, char*, size_t, int, char*, char const**);
static ZIP_Open_t ZIP_Open = nullptr;
static ZIP_Close_t ZIP_Close = nullptr;
static ZIP_FindEntry_t ZIP_FindEntry = nullptr;
static ZIP_ReadEntry_t ZIP_ReadEntry = nullptr;
static ZIP_CRC32_t ZIP_CRC32 = nullptr;
static ZIP_GZip_InitParams_t ZIP_GZip_InitParams = nullptr;
static ZIP_GZip_Fully_t ZIP_GZip_Fully = nullptr;
static void* _zip_handle = nullptr;
static bool _loaded = false;
static inline bool is_loaded() {
return Atomic::load_acquire(&_loaded);
}
static inline bool not_loaded() {
return !is_loaded();
}
static void* dll_lookup(const char* name, const char* path, bool vm_exit_on_failure) {
assert(_zip_handle != nullptr, "invariant");
void* func = os::dll_lookup(_zip_handle, name);
if (func == nullptr && vm_exit_on_failure) {
char msg[256] = "";
jio_snprintf(&msg[0], sizeof msg, "Could not resolve \"%s\"", name);
vm_exit_during_initialization(&msg[0], path);
}
return func;
}
static void store_function_pointers(const char* path, bool vm_exit_on_failure) {
assert(_zip_handle != nullptr, "invariant");
ZIP_Open = CAST_TO_FN_PTR(ZIP_Open_t, dll_lookup("ZIP_Open", path, vm_exit_on_failure));
ZIP_Close = CAST_TO_FN_PTR(ZIP_Close_t, dll_lookup("ZIP_Close", path, vm_exit_on_failure));
ZIP_FindEntry = CAST_TO_FN_PTR(ZIP_FindEntry_t, dll_lookup("ZIP_FindEntry", path, vm_exit_on_failure));
ZIP_ReadEntry = CAST_TO_FN_PTR(ZIP_ReadEntry_t, dll_lookup("ZIP_ReadEntry", path, vm_exit_on_failure));
ZIP_CRC32 = CAST_TO_FN_PTR(ZIP_CRC32_t, dll_lookup("ZIP_CRC32", path, vm_exit_on_failure));
// The following entry points are most likely optional from a zip library implementation perspective.
// Hence no vm_exit on a resolution failure. Further refactorings should investigate this,
// and if possible, streamline setting all entry points consistently.
ZIP_GZip_InitParams = CAST_TO_FN_PTR(ZIP_GZip_InitParams_t, dll_lookup("ZIP_GZip_InitParams", path, false));
ZIP_GZip_Fully = CAST_TO_FN_PTR(ZIP_GZip_Fully_t, dll_lookup("ZIP_GZip_Fully", path, false));
}
static void load_zip_library(bool vm_exit_on_failure) {
assert(!is_loaded(), "should not load zip library twice");
char path[JVM_MAXPATHLEN];
if (os::dll_locate_lib(&path[0], sizeof path, Arguments::get_dll_dir(), "zip")) {
char ebuf[1024];
_zip_handle = os::dll_load(&path[0], &ebuf[0], sizeof ebuf);
}
if (_zip_handle == nullptr) {
if (vm_exit_on_failure) {
vm_exit_during_initialization("Unable to load zip library", &path[0]);
}
return;
}
store_function_pointers(&path[0], vm_exit_on_failure);
Atomic::release_store(&_loaded, true);
assert(is_loaded(), "invariant");
}
//
// Helper mutex class that also ensures that java threads
// are in _thread_in_native when loading the zip library.
//
class ZipLibraryLoaderLock : public StackObj {
private:
static Semaphore _lock;
JavaThread* _jt;
public:
ZipLibraryLoaderLock() : _jt(nullptr) {
Thread* thread = Thread::current_or_null();
if (thread != nullptr && thread->is_Java_thread()) {
JavaThread* const jt = JavaThread::cast(thread);
if (jt->thread_state() != _thread_in_native) {
_jt = jt;
ThreadStateTransition::transition_from_vm(jt, _thread_in_native, false);
}
}
_lock.wait();
}
~ZipLibraryLoaderLock() {
_lock.signal();
if (_jt != nullptr) {
ThreadStateTransition::transition_from_native(_jt, _thread_in_vm, false);
}
}
};
Semaphore ZipLibraryLoaderLock::_lock(1);
static void initialize(bool vm_exit_on_failure = true) {
if (is_loaded()) {
return;
}
ZipLibraryLoaderLock lock;
if (not_loaded()) {
load_zip_library(vm_exit_on_failure);
}
}
void** ZipLibrary::open(const char* name, char** pmsg) {
initialize();
assert(ZIP_Open != nullptr, "invariant");
return ZIP_Open(name, pmsg);
}
void ZipLibrary::close(jzfile* zip) {
assert(is_loaded(), "invariant");
assert(ZIP_Close != nullptr, "invariant");
ZIP_Close(zip);
}
jzentry* ZipLibrary::find_entry(jzfile* zip, const char* name, jint* sizeP, jint* nameLen) {
initialize();
assert(ZIP_FindEntry != nullptr, "invariant");
return ZIP_FindEntry(zip, name, sizeP, nameLen);
}
jboolean ZipLibrary::read_entry(jzfile* zip, jzentry* entry, unsigned char* buf, char* namebuf) {
initialize();
assert(ZIP_ReadEntry != nullptr, "invariant");
return ZIP_ReadEntry(zip, entry, buf, namebuf);
}
jint ZipLibrary::crc32(jint crc, const jbyte* buf, jint len) {
initialize();
assert(ZIP_CRC32 != nullptr, "invariant");
return ZIP_CRC32(crc, buf, len);
}
const char* ZipLibrary::init_params(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size, int level) {
initialize(false);
if (ZIP_GZip_InitParams == nullptr) {
return "Cannot get ZIP_GZip_InitParams function";
}
return ZIP_GZip_InitParams(block_size, needed_out_size, needed_tmp_size, level);
}
size_t ZipLibrary::compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size, int level, char* buf, const char** pmsg) {
initialize(false);
if (ZIP_GZip_Fully == nullptr) {
*pmsg = "Cannot get ZIP_GZip_Fully function";
return 0;
}
return ZIP_GZip_Fully(in, in_size, out, out_size, tmp, tmp_size, level, buf, pmsg);
}
void* ZipLibrary::handle() {
initialize();
assert(is_loaded(), "invariant");
assert(_zip_handle != nullptr, "invariant");
return _zip_handle;
}

View File

@@ -0,0 +1,55 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_UTILITIES_ZIPLIBRARY_HPP
#define SHARE_UTILITIES_ZIPLIBRARY_HPP
#include "memory/allocation.hpp"
// Type definitions for zip file and zip file entry
typedef void* jzfile;
typedef struct {
char* name; /* entry name */
jlong time; /* modification time */
jlong size; /* size of uncompressed data */
jlong csize; /* size of compressed data (zero if uncompressed) */
jint crc; /* crc of uncompressed data */
char* comment; /* optional zip file comment */
jbyte* extra; /* optional extra data */
jlong pos; /* position of LOC header (if negative) or data */
} jzentry;
class ZipLibrary : AllStatic {
public:
static void** open(const char* name, char** pmsg);
static void close(jzfile* zip);
static jzentry* find_entry(jzfile* zip, const char* name, jint* sizeP, jint* nameLen);
static jboolean read_entry(jzfile* zip, jzentry* entry, unsigned char* buf, char* namebuf);
static jint crc32(jint crc, const jbyte* buf, jint len);
static const char* init_params(size_t block_size, size_t* needed_out_size, size_t* needed_tmp_size, int level);
static size_t compress(char* in, size_t in_size, char* out, size_t out_size, char* tmp, size_t tmp_size, int level, char* buf, const char** pmsg);
static void* handle();
};
#endif // SHARE_UTILITIES_ZIPLIBRARY_HPP

View File

@@ -787,7 +787,8 @@ public final class ScopedValue<T> {
// Bindings can be in one of four states:
//
// 1: class Thread: this is a new Thread instance, and no
// scoped values have ever been bound in this Thread.
// scoped values have ever been bound in this Thread, and neither
// have any scoped value bindings been inherited from a parent.
// 2: EmptySnapshot.SINGLETON: This is effectively an empty binding.
// 3: A Snapshot instance: this contains one or more scoped value
// bindings.
@@ -798,18 +799,18 @@ public final class ScopedValue<T> {
Object bindings = Thread.scopedValueBindings();
if (bindings == NEW_THREAD_BINDINGS) {
// This must be a new thread
return Snapshot.EMPTY_SNAPSHOT;
return Snapshot.EMPTY_SNAPSHOT;
}
if (bindings == null) {
// Search the stack
bindings = Thread.findScopedValueBindings();
if (bindings == null) {
// Nothing on the stack.
if (bindings == NEW_THREAD_BINDINGS || bindings == null) {
// We've walked the stack without finding anything.
bindings = Snapshot.EMPTY_SNAPSHOT;
}
Thread.setScopedValueBindings(bindings);
}
assert (bindings != null);
Thread.setScopedValueBindings(bindings);
return (Snapshot) bindings;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -130,12 +130,16 @@ final class CertificateMessage {
byte[] encodedCert = Record.getBytes24(m);
listLen -= (3 + encodedCert.length);
encodedCerts.add(encodedCert);
if (encodedCerts.size() > SSLConfiguration.maxCertificateChainLength) {
int maxAllowedChainLength = handshakeContext.sslConfig.isClientMode ?
SSLConfiguration.maxInboundServerCertChainLen :
SSLConfiguration.maxInboundClientCertChainLen;
if (encodedCerts.size() > maxAllowedChainLength) {
throw new SSLProtocolException(
"The certificate chain length ("
+ encodedCerts.size()
+ ") exceeds the maximum allowed length ("
+ SSLConfiguration.maxCertificateChainLength
+ maxAllowedChainLength
+ ")");
}
@@ -861,12 +865,16 @@ final class CertificateMessage {
SSLExtensions extensions =
new SSLExtensions(this, m, enabledExtensions);
certList.add(new CertificateEntry(encodedCert, extensions));
if (certList.size() > SSLConfiguration.maxCertificateChainLength) {
int maxAllowedChainLength = handshakeContext.sslConfig.isClientMode ?
SSLConfiguration.maxInboundServerCertChainLen :
SSLConfiguration.maxInboundClientCertChainLen;
if (certList.size() > maxAllowedChainLength) {
throw new SSLProtocolException(
"The certificate chain length ("
+ certList.size()
+ ") exceeds the maximum allowed length ("
+ SSLConfiguration.maxCertificateChainLength
+ maxAllowedChainLength
+ ")");
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -108,9 +108,11 @@ final class SSLConfiguration implements Cloneable {
static final int maxHandshakeMessageSize = GetIntegerAction.privilegedGetProperty(
"jdk.tls.maxHandshakeMessageSize", 32768);
// Set the max certificate chain length to 10
static final int maxCertificateChainLength = GetIntegerAction.privilegedGetProperty(
"jdk.tls.maxCertificateChainLength", 10);
// Limit the certificate chain length accepted from clients
static final int maxInboundClientCertChainLen;
// Limit the certificate chain length accepted from servers
static final int maxInboundServerCertChainLen;
// To switch off the supported_groups extension for DHE cipher suite.
static final boolean enableFFDHE =
@@ -133,6 +135,55 @@ final class SSLConfiguration implements Cloneable {
useExtendedMasterSecret = supportExtendedMasterSecret;
}
static {
boolean globalPropSet = false;
// jdk.tls.maxCertificateChainLength property has no default
Integer maxCertificateChainLength = GetIntegerAction.privilegedGetProperty(
"jdk.tls.maxCertificateChainLength");
if (maxCertificateChainLength != null && maxCertificateChainLength >= 0) {
globalPropSet = true;
}
/*
* If either jdk.tls.server.maxInboundCertificateChainLength or
* jdk.tls.client.maxInboundCertificateChainLength is set, it will
* override jdk.tls.maxCertificateChainLength, regardless of whether
* jdk.tls.maxCertificateChainLength is set or not.
* If neither jdk.tls.server.maxInboundCertificateChainLength nor
* jdk.tls.client.maxInboundCertificateChainLength is set, the behavior
* depends on the setting of jdk.tls.maxCertificateChainLength. If
* jdk.tls.maxCertificateChainLength is set, it falls back to that
* value; otherwise, it defaults to 8 for
* jdk.tls.server.maxInboundCertificateChainLength
* and 10 for jdk.tls.client.maxInboundCertificateChainLength.
* Users can independently set either
* jdk.tls.server.maxInboundCertificateChainLength or
* jdk.tls.client.maxInboundCertificateChainLength.
*/
Integer inboundClientLen = GetIntegerAction.privilegedGetProperty(
"jdk.tls.server.maxInboundCertificateChainLength");
// Default for jdk.tls.server.maxInboundCertificateChainLength is 8
if (inboundClientLen == null || inboundClientLen < 0) {
maxInboundClientCertChainLen = globalPropSet ?
maxCertificateChainLength : 8;
} else {
maxInboundClientCertChainLen = inboundClientLen;
}
Integer inboundServerLen = GetIntegerAction.privilegedGetProperty(
"jdk.tls.client.maxInboundCertificateChainLength");
// Default for jdk.tls.client.maxInboundCertificateChainLength is 10
if (inboundServerLen == null || inboundServerLen < 0) {
maxInboundServerCertChainLen = globalPropSet ?
maxCertificateChainLength : 10;
} else {
maxInboundServerCertChainLen = inboundServerLen;
}
}
SSLConfiguration(SSLContextImpl sslContext, boolean isClientMode) {
// Configurations with SSLParameters, default values.

View File

@@ -81,6 +81,7 @@ public class Debug {
System.err.println("logincontext login context results");
System.err.println("jca JCA engine class debugging");
System.err.println("keystore KeyStore debugging");
System.err.println("pcsc Smartcard library debugging");
System.err.println("policy loading and granting");
System.err.println("provider security provider debugging");
System.err.println("pkcs11 PKCS11 session manager debugging");

View File

@@ -1,9 +1,7 @@
## Harfbuzz v7.2.0
## Harfbuzz v8.2.2
### Harfbuzz License
https://github.com/harfbuzz/harfbuzz/blob/7.2.0/COPYING
<pre>
HarfBuzz is licensed under the so-called "Old MIT" license. Details follow.
@@ -14,6 +12,7 @@ Copyright © 2010-2023 Google, Inc.
Copyright © 2018-2020 Ebrahim Byagowi
Copyright © 2004-2013 Red Hat, Inc.
Copyright © 2019 Facebook, Inc.
Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
Copyright © 2007 Chris Wilson
Copyright © 2018-2019 Adobe Inc.
Copyright © 2006-2023 Behdad Esfahbod
@@ -72,6 +71,15 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
---------------------------------
The below license applies to the following files:
libharfbuzz/hb-unicode-emoji-table.hh
© 2023 Unicode®, Inc.
Unicode and the Unicode Logo are registered trademarks of Unicode, Inc.
in the U.S. and other countries.
For terms of use, see https://www.unicode.org/terms_of_use.html
</pre>
### AUTHORS File Information

View File

@@ -397,7 +397,6 @@ struct IndexSubtableRecord
TRACE_SERIALIZE (this);
auto *subtable = c->serializer->start_embed<IndexSubtable> ();
if (unlikely (!subtable)) return_trace (false);
if (unlikely (!c->serializer->extend_min (subtable))) return_trace (false);
auto *old_subtable = get_subtable (base);
@@ -545,7 +544,8 @@ struct IndexSubtableArray
const IndexSubtableRecord*>> *lookup /* OUT */) const
{
bool start_glyph_is_set = false;
for (hb_codepoint_t new_gid = 0; new_gid < c->plan->num_output_glyphs (); new_gid++)
unsigned num_glyphs = c->plan->num_output_glyphs ();
for (hb_codepoint_t new_gid = 0; new_gid < num_glyphs; new_gid++)
{
hb_codepoint_t old_gid;
if (unlikely (!c->plan->old_gid_for_new_gid (new_gid, &old_gid))) continue;
@@ -576,9 +576,6 @@ struct IndexSubtableArray
{
TRACE_SUBSET (this);
auto *dst = c->serializer->start_embed<IndexSubtableArray> ();
if (unlikely (!dst)) return_trace (false);
hb_vector_t<hb_pair_t<hb_codepoint_t, const IndexSubtableRecord*>> lookup;
build_lookup (c, bitmap_size_context, &lookup);
if (unlikely (!c->serializer->propagate_error (lookup)))
@@ -993,12 +990,10 @@ CBLC::subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *cblc_prime = c->serializer->start_embed<CBLC> ();
// Use a vector as a secondary buffer as the tables need to be built in parallel.
hb_vector_t<char> cbdt_prime;
if (unlikely (!cblc_prime)) return_trace (false);
auto *cblc_prime = c->serializer->start_embed<CBLC> ();
if (unlikely (!c->serializer->extend_min (cblc_prime))) return_trace (false);
cblc_prime->version = version;

View File

@@ -53,6 +53,7 @@ struct Paint;
struct hb_paint_context_t :
hb_dispatch_context_t<hb_paint_context_t>
{
const char *get_name () { return "PAINT"; }
template <typename T>
return_t dispatch (const T &obj) { obj.paint_glyph (this); return hb_empty_t (); }
static return_t default_return_value () { return hb_empty_t (); }
@@ -68,6 +69,8 @@ public:
unsigned int palette_index;
hb_color_t foreground;
VarStoreInstancer &instancer;
hb_map_t current_glyphs;
hb_map_t current_layers;
int depth_left = HB_MAX_NESTING_LEVEL;
int edge_count = HB_COLRV1_MAX_EDGE_COUNT;
@@ -261,6 +264,7 @@ struct Variable
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
value.paint_glyph (c, varIdxBase);
}
@@ -281,7 +285,7 @@ struct Variable
public:
VarIdx varIdxBase;
public:
DEFINE_SIZE_STATIC (4 + T::static_size);
DEFINE_SIZE_MIN (VarIdx::static_size + T::min_size);
};
template <typename T>
@@ -315,6 +319,7 @@ struct NoVariable
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
value.paint_glyph (c, varIdxBase);
}
@@ -332,7 +337,7 @@ struct NoVariable
T value;
public:
DEFINE_SIZE_STATIC (T::static_size);
DEFINE_SIZE_MIN (T::min_size);
};
// Color structures
@@ -409,7 +414,6 @@ struct ColorLine
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (this);
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
if (!c->serializer->check_assign (out->extend, extend, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
@@ -559,6 +563,7 @@ struct Affine2x3
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
c->funcs->push_transform (c->data,
xx.to_float (c->instancer (varIdxBase, 0)),
yx.to_float (c->instancer (varIdxBase, 1)),
@@ -640,6 +645,7 @@ struct PaintSolid
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_bool_t is_foreground;
hb_color_t color;
@@ -694,6 +700,7 @@ struct PaintLinearGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@@ -760,6 +767,7 @@ struct PaintRadialGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@@ -824,6 +832,7 @@ struct PaintSweepGradient
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
hb_color_line_t cl = {
(void *) &(this+colorLine),
(this+colorLine).static_get_color_stops, c,
@@ -875,6 +884,7 @@ struct PaintGlyph
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
c->funcs->push_inverse_root_transform (c->data, c->font);
c->funcs->push_clip_glyph (c->data, gid, c->font);
c->funcs->push_root_transform (c->data, c->font);
@@ -947,6 +957,7 @@ struct PaintTransform
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
(this+transform).paint_glyph (c);
c->recurse (this+src);
c->funcs->pop_transform (c->data);
@@ -991,6 +1002,7 @@ struct PaintTranslate
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float ddx = dx + c->instancer (varIdxBase, 0);
float ddy = dy + c->instancer (varIdxBase, 1);
@@ -1039,6 +1051,7 @@ struct PaintScale
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = scaleX.to_float (c->instancer (varIdxBase, 0));
float sy = scaleY.to_float (c->instancer (varIdxBase, 1));
@@ -1089,6 +1102,7 @@ struct PaintScaleAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = scaleX.to_float (c->instancer (varIdxBase, 0));
float sy = scaleY.to_float (c->instancer (varIdxBase, 1));
float tCenterX = centerX + c->instancer (varIdxBase, 2);
@@ -1142,6 +1156,7 @@ struct PaintScaleUniform
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float s = scale.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_scale (c->data, s, s);
@@ -1189,6 +1204,7 @@ struct PaintScaleUniformAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float s = scale.to_float (c->instancer (varIdxBase, 0));
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
@@ -1240,6 +1256,7 @@ struct PaintRotate
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float a = angle.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_rotate (c->data, a);
@@ -1287,6 +1304,7 @@ struct PaintRotateAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float a = angle.to_float (c->instancer (varIdxBase, 0));
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
@@ -1341,6 +1359,7 @@ struct PaintSkew
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = xSkewAngle.to_float(c->instancer (varIdxBase, 0));
float sy = ySkewAngle.to_float(c->instancer (varIdxBase, 1));
@@ -1391,6 +1410,7 @@ struct PaintSkewAroundCenter
void paint_glyph (hb_paint_context_t *c, uint32_t varIdxBase) const
{
TRACE_PAINT (this);
float sx = xSkewAngle.to_float(c->instancer (varIdxBase, 0));
float sy = ySkewAngle.to_float(c->instancer (varIdxBase, 1));
float tCenterX = centerX + c->instancer (varIdxBase, 2);
@@ -1426,20 +1446,24 @@ struct PaintComposite
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
if (!out->src.serialize_subset (c, src, this, instancer)) return_trace (false);
return_trace (out->backdrop.serialize_subset (c, backdrop, this, instancer));
bool ret = false;
ret |= out->src.serialize_subset (c, src, this, instancer);
ret |= out->backdrop.serialize_subset (c, backdrop, this, instancer);
return_trace (ret);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
c->check_ops (this->min_size) && // PainComposite can get exponential
src.sanitize (c, this) &&
backdrop.sanitize (c, this));
}
void paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
c->recurse (this+backdrop);
c->funcs->push_group (c->data);
c->recurse (this+src);
@@ -1514,10 +1538,10 @@ struct ClipBoxFormat2 : Variable<ClipBoxFormat1>
value.get_clip_box(clip_box, instancer);
if (instancer)
{
clip_box.xMin += _hb_roundf (instancer (varIdxBase, 0));
clip_box.yMin += _hb_roundf (instancer (varIdxBase, 1));
clip_box.xMax += _hb_roundf (instancer (varIdxBase, 2));
clip_box.yMax += _hb_roundf (instancer (varIdxBase, 3));
clip_box.xMin += roundf (instancer (varIdxBase, 0));
clip_box.yMin += roundf (instancer (varIdxBase, 1));
clip_box.xMax += roundf (instancer (varIdxBase, 2));
clip_box.yMax += roundf (instancer (varIdxBase, 3));
}
}
};
@@ -1898,15 +1922,16 @@ struct LayerList : Array32OfOffset32To<Paint>
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto& _ : + hb_enumerate (*this)
| hb_filter (c->plan->colrv1_layers, hb_first))
{
auto *o = out->serialize_append (c->serializer);
if (unlikely (!o) || !o->serialize_subset (c, _.second, this, instancer))
return_trace (false);
if (unlikely (!o)) return_trace (false);
ret |= o->serialize_subset (c, _.second, this, instancer);
}
return_trace (true);
return_trace (ret);
}
bool sanitize (hb_sanitize_context_t *c) const
@@ -2167,7 +2192,7 @@ struct COLR
if (version == 0 && (!base_it || !layer_it))
return_trace (false);
COLR *colr_prime = c->serializer->start_embed<COLR> ();
auto *colr_prime = c->serializer->start_embed<COLR> ();
if (unlikely (!c->serializer->extend_min (colr_prime))) return_trace (false);
if (version == 0)
@@ -2284,6 +2309,7 @@ struct COLR
&(this+varIdxMap),
hb_array (font->coords, font->num_coords));
hb_paint_context_t c (this, funcs, data, font, palette_index, foreground, instancer);
c.current_glyphs.add (glyph);
if (version == 1)
{
@@ -2399,18 +2425,42 @@ hb_paint_context_t::recurse (const Paint &paint)
void PaintColrLayers::paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
const LayerList &paint_offset_lists = c->get_colr_table ()->get_layerList ();
for (unsigned i = firstLayerIndex; i < firstLayerIndex + numLayers; i++)
{
if (unlikely (c->current_layers.has (i)))
continue;
c->current_layers.add (i);
const Paint &paint = paint_offset_lists.get_paint (i);
c->funcs->push_group (c->data);
c->recurse (paint);
c->funcs->pop_group (c->data, HB_PAINT_COMPOSITE_MODE_SRC_OVER);
c->current_layers.del (i);
}
}
void PaintColrGlyph::paint_glyph (hb_paint_context_t *c) const
{
TRACE_PAINT (this);
if (unlikely (c->current_glyphs.has (gid)))
return;
c->current_glyphs.add (gid);
c->funcs->push_inverse_root_transform (c->data, c->font);
if (c->funcs->color_glyph (c->data, gid, c->font))
{
c->funcs->pop_transform (c->data);
c->current_glyphs.del (gid);
return;
}
c->funcs->pop_transform (c->data);
const COLR *colr_table = c->get_colr_table ();
const Paint *paint = colr_table->get_base_glyph_paint (gid);
@@ -2429,6 +2479,8 @@ void PaintColrGlyph::paint_glyph (hb_paint_context_t *c) const
if (has_clip_box)
c->funcs->pop_clip (c->data);
c->current_glyphs.del (gid);
}
} /* namespace OT */

View File

@@ -48,7 +48,6 @@ struct SBIXGlyph
{
TRACE_SERIALIZE (this);
SBIXGlyph* new_glyph = c->start_embed<SBIXGlyph> ();
if (unlikely (!new_glyph)) return_trace (nullptr);
if (unlikely (!c->extend_min (new_glyph))) return_trace (nullptr);
new_glyph->xOffset = xOffset;
@@ -143,7 +142,6 @@ struct SBIXStrike
unsigned int num_output_glyphs = c->plan->num_output_glyphs ();
auto* out = c->serializer->start_embed<SBIXStrike> ();
if (unlikely (!out)) return_trace (false);
auto snap = c->serializer->snapshot ();
if (unlikely (!c->serializer->extend (out, num_output_glyphs + 1))) return_trace (false);
out->ppem = ppem;
@@ -388,7 +386,6 @@ struct sbix
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed<Array32OfOffset32To<SBIXStrike>> ();
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
hb_vector_t<Offset32To<SBIXStrike>*> new_strikes;
@@ -423,8 +420,6 @@ struct sbix
{
TRACE_SUBSET (this);
sbix *sbix_prime = c->serializer->start_embed<sbix> ();
if (unlikely (!sbix_prime)) return_trace (false);
if (unlikely (!c->serializer->embed (this->version))) return_trace (false);
if (unlikely (!c->serializer->embed (this->flags))) return_trace (false);

View File

@@ -57,6 +57,9 @@ struct Coverage
public:
DEFINE_SIZE_UNION (2, format);
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
@@ -113,22 +116,33 @@ struct Coverage
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned count = 0;
unsigned count = hb_len (glyphs);
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
hb_codepoint_t max = 0;
bool unsorted = false;
for (auto g: glyphs)
{
if (last != (hb_codepoint_t) -2 && g < last)
unsorted = true;
if (last + 1 != g)
num_ranges++;
last = g;
count++;
if (g > max) max = g;
}
u.format = count <= num_ranges * 3 ? 1 : 2;
u.format = !unsorted && count <= num_ranges * 3 ? 1 : 2;
#ifndef HB_NO_BEYOND_64K
if (count && last > 0xFFFFu)
if (max > 0xFFFFu)
u.format += 2;
if (unlikely (max > 0xFFFFFFu))
#else
if (unlikely (max > 0xFFFFu))
#endif
{
c->check_success (false, HB_SERIALIZE_ERROR_INT_OVERFLOW);
return_trace (false);
}
switch (u.format)
{
@@ -148,8 +162,8 @@ struct Coverage
auto it =
+ iter ()
| hb_take (c->plan->source->get_num_glyphs ())
| hb_filter (c->plan->glyph_map_gsub)
| hb_map_retains_sorting (c->plan->glyph_map_gsub)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
// Cache the iterator result as it will be iterated multiple times

View File

@@ -79,7 +79,7 @@ struct CoverageFormat1_3
{
if (glyphArray.len > glyphs->get_population () * hb_bit_storage ((unsigned) glyphArray.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;

View File

@@ -95,19 +95,26 @@ struct CoverageFormat2_4
unsigned count = 0;
unsigned range = (unsigned) -1;
last = (hb_codepoint_t) -2;
unsigned unsorted = false;
for (auto g: glyphs)
{
if (last + 1 != g)
{
if (unlikely (last != (hb_codepoint_t) -2 && last + 1 > g))
unsorted = true;
range++;
rangeRecord[range].first = g;
rangeRecord[range].value = count;
rangeRecord.arrayZ[range].first = g;
rangeRecord.arrayZ[range].value = count;
}
rangeRecord[range].last = g;
rangeRecord.arrayZ[range].last = g;
last = g;
count++;
}
if (unlikely (unsorted))
rangeRecord.as_array ().qsort (RangeRecord<Types>::cmp_range);
return_trace (true);
}
@@ -115,7 +122,7 @@ struct CoverageFormat2_4
{
if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2)
{
for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);)
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
@@ -185,8 +192,8 @@ struct CoverageFormat2_4
if (__more__ ())
{
unsigned int old = coverage;
j = c->rangeRecord[i].first;
coverage = c->rangeRecord[i].value;
j = c->rangeRecord.arrayZ[i].first;
coverage = c->rangeRecord.arrayZ[i].value;
if (unlikely (coverage != old + 1))
{
/* Broken table. Skip. Important to avoid DoS.

View File

@@ -51,6 +51,18 @@ struct RangeRecord
int cmp (hb_codepoint_t g) const
{ return g < first ? -1 : g <= last ? 0 : +1; }
HB_INTERNAL static int cmp_range (const void *pa, const void *pb) {
const RangeRecord *a = (const RangeRecord *) pa;
const RangeRecord *b = (const RangeRecord *) pb;
if (a->first < b->first) return -1;
if (a->first > b->first) return +1;
if (a->last < b->last) return -1;
if (a->last > b->last) return +1;
if (a->value < b->value) return -1;
if (a->value > b->value) return +1;
return 0;
}
unsigned get_population () const
{
if (unlikely (last < first)) return 0;

View File

@@ -29,9 +29,10 @@
#ifndef OT_LAYOUT_GDEF_GDEF_HH
#define OT_LAYOUT_GDEF_GDEF_HH
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-var-common.hh"
#include "../../../hb-font.hh"
#include "../../../hb-cache.hh"
namespace OT {
@@ -48,8 +49,6 @@ struct AttachPoint : Array16Of<HBUINT16>
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
return_trace (out->serialize (c->serializer, + iter ()));
}
};
@@ -201,22 +200,23 @@ struct CaretValueFormat3
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (!c->serializer->embed (caretValueFormat)) return_trace (false);
if (!c->serializer->embed (coordinate)) return_trace (false);
unsigned varidx = (this+deviceTable).get_variation_index ();
if (c->plan->layout_variation_idx_delta_map.has (varidx))
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (varidx, &new_varidx_delta))
return_trace (false);
uint32_t new_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (varidx));
if (delta != 0)
{
if (!c->serializer->check_assign (out->coordinate, coordinate + delta, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
if (!c->serializer->check_assign (out->coordinate, coordinate + delta, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
if (c->plan->all_axes_pinned)
if (new_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
return_trace (c->serializer->check_assign (out->caretValueFormat, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (deviceTable))
@@ -441,6 +441,16 @@ struct MarkGlyphSetsFormat1
bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{ return (this+coverage[set_index]).get_coverage (glyph_id) != NOT_COVERED; }
template <typename set_t>
void collect_coverage (hb_vector_t<set_t> &sets) const
{
for (const auto &offset : coverage)
{
const auto &cov = this+offset;
cov.collect_coverage (sets.push ());
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@@ -494,6 +504,15 @@ struct MarkGlyphSets
}
}
template <typename set_t>
void collect_coverage (hb_vector_t<set_t> &sets) const
{
switch (u.format) {
case 1: u.format1.collect_coverage (sets); return;
default:return;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@@ -585,6 +604,26 @@ struct GDEFVersion1_2
(version.to_int () < 0x00010003u || varStore.sanitize (c, this)));
}
static void remap_varidx_after_instantiation (const hb_map_t& varidx_map,
hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>>& layout_variation_idx_delta_map /* IN/OUT */)
{
/* varidx_map is empty which means varstore is empty after instantiation,
* no variations, map all varidx to HB_OT_LAYOUT_NO_VARIATIONS_INDEX.
* varidx_map doesn't have original varidx, indicating delta row is all
* zeros, map varidx to HB_OT_LAYOUT_NO_VARIATIONS_INDEX */
for (auto _ : layout_variation_idx_delta_map.iter_ref ())
{
/* old_varidx->(varidx, delta) mapping generated for subsetting, then this
* varidx is used as key of varidx_map during instantiation */
uint32_t varidx = _.second.first;
uint32_t *new_varidx;
if (varidx_map.has (varidx, &new_varidx))
_.second.first = *new_varidx;
else
_.second.first = HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
@@ -607,6 +646,22 @@ struct GDEFVersion1_2
{
if (c->plan->all_axes_pinned)
out->varStore = 0;
else if (c->plan->normalized_coords)
{
if (varStore)
{
item_variations_t item_vars;
if (item_vars.instantiate (this+varStore, c->plan, true, true,
c->plan->gdef_varstore_inner_maps.as_array ()))
subset_varstore = out->varStore.serialize_serialize (c->serializer,
item_vars.has_long_word (),
c->plan->axis_tags,
item_vars.get_region_list (),
item_vars.get_vardata_encodings ());
remap_varidx_after_instantiation (item_vars.get_varidx_map (),
c->plan->layout_variation_idx_delta_map);
}
}
else
subset_varstore = out->varStore.serialize_subset (c, varStore, this, c->plan->gdef_varstore_inner_maps.as_array ());
}
@@ -858,27 +913,79 @@ struct GDEF
hb_blob_destroy (table.get_blob ());
table = hb_blob_get_empty ();
}
#ifndef HB_NO_GDEF_CACHE
table->get_mark_glyph_sets ().collect_coverage (mark_glyph_set_digests);
#endif
}
~accelerator_t () { table.destroy (); }
unsigned int get_glyph_props (hb_codepoint_t glyph) const
{
unsigned v;
#ifndef HB_NO_GDEF_CACHE
if (glyph_props_cache.get (glyph, &v))
return v;
#endif
v = table->get_glyph_props (glyph);
#ifndef HB_NO_GDEF_CACHE
if (likely (table.get_blob ())) // Don't try setting if we are the null instance!
glyph_props_cache.set (glyph, v);
#endif
return v;
}
bool mark_set_covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{
return
#ifndef HB_NO_GDEF_CACHE
mark_glyph_set_digests[set_index].may_have (glyph_id) &&
#endif
table->mark_set_covers (set_index, glyph_id);
}
hb_blob_ptr_t<GDEF> table;
#ifndef HB_NO_GDEF_CACHE
hb_vector_t<hb_set_digest_t> mark_glyph_set_digests;
mutable hb_cache_t<21, 3, 8> glyph_props_cache;
#endif
};
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{ get_lig_caret_list ().collect_variation_indices (c); }
void remap_layout_variation_indices (const hb_set_t *layout_variation_indices,
const hb_vector_t<int>& normalized_coords,
bool calculate_delta, /* not pinned at default */
bool no_variations, /* all axes pinned */
hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map /* OUT */) const
{
if (!has_var_store ()) return;
if (layout_variation_indices->is_empty ()) return;
const VariationStore &var_store = get_var_store ();
float *store_cache = var_store.create_cache ();
unsigned new_major = 0, new_minor = 0;
unsigned last_major = (layout_variation_indices->get_min ()) >> 16;
for (unsigned idx : layout_variation_indices->iter ())
{
int delta = 0;
if (calculate_delta)
delta = roundf (var_store.get_delta (idx, normalized_coords.arrayZ,
normalized_coords.length, store_cache));
if (no_variations)
{
layout_variation_idx_delta_map->set (idx, hb_pair_t<unsigned, int> (HB_OT_LAYOUT_NO_VARIATIONS_INDEX, delta));
continue;
}
uint16_t major = idx >> 16;
if (major >= get_var_store ().get_sub_table_count ()) break;
if (major >= var_store.get_sub_table_count ()) break;
if (major != last_major)
{
new_minor = 0;
@@ -886,14 +993,11 @@ struct GDEF
}
unsigned new_idx = (new_major << 16) + new_minor;
if (!layout_variation_idx_delta_map->has (idx))
continue;
int delta = hb_second (layout_variation_idx_delta_map->get (idx));
layout_variation_idx_delta_map->set (idx, hb_pair_t<unsigned, int> (new_idx, delta));
++new_minor;
last_major = major;
}
var_store.destroy_cache (store_cache);
}
protected:

View File

@@ -25,7 +25,9 @@ struct AnchorFormat3
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this));
if (unlikely (!c->check_struct (this))) return_trace (false);
return_trace (xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this));
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED,
@@ -35,9 +37,9 @@ struct AnchorFormat3
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
if (font->x_ppem || font->num_coords)
if ((font->x_ppem || font->num_coords) && xDeviceTable.sanitize (&c->sanitizer, this))
*x += (this+xDeviceTable).get_x_delta (font, c->var_store, c->var_store_cache);
if (font->y_ppem || font->num_coords)
if ((font->y_ppem || font->num_coords) && yDeviceTable.sanitize (&c->sanitizer, this))
*y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache);
}
@@ -45,15 +47,19 @@ struct AnchorFormat3
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
if (unlikely (!c->serializer->embed (format))) return_trace (false);
if (unlikely (!c->serializer->embed (xCoordinate))) return_trace (false);
if (unlikely (!c->serializer->embed (yCoordinate))) return_trace (false);
unsigned x_varidx = xDeviceTable ? (this+xDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (x_varidx))
if (x_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (x_varidx));
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (x_varidx, &new_varidx_delta))
return_trace (false);
x_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->xCoordinate, xCoordinate + delta,
@@ -63,9 +69,14 @@ struct AnchorFormat3
}
unsigned y_varidx = yDeviceTable ? (this+yDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (c->plan->layout_variation_idx_delta_map.has (y_varidx))
if (y_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
int delta = hb_second (c->plan->layout_variation_idx_delta_map.get (y_varidx));
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (y_varidx, &new_varidx_delta))
return_trace (false);
y_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->yCoordinate, yCoordinate + delta,
@@ -74,7 +85,10 @@ struct AnchorFormat3
}
}
if (c->plan->all_axes_pinned)
/* in case that all axes are pinned or no variations after instantiation,
* both var_idxes will be mapped to HB_OT_LAYOUT_NO_VARIATIONS_INDEX */
if (x_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX &&
y_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
return_trace (c->serializer->check_assign (out->format, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (xDeviceTable)) return_trace (false);

View File

@@ -21,18 +21,25 @@ struct AnchorMatrix
if (unlikely (hb_unsigned_mul_overflows (rows, cols))) return_trace (false);
unsigned int count = rows * cols;
if (!c->check_array (matrixZ.arrayZ, count)) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
for (unsigned int i = 0; i < count; i++)
if (!matrixZ[i].sanitize (c, this)) return_trace (false);
return_trace (true);
}
const Anchor& get_anchor (unsigned int row, unsigned int col,
const Anchor& get_anchor (hb_ot_apply_context_t *c,
unsigned int row, unsigned int col,
unsigned int cols, bool *found) const
{
*found = false;
if (unlikely (row >= rows || col >= cols)) return Null (Anchor);
*found = !matrixZ[row * cols + col].is_null ();
return this+matrixZ[row * cols + col];
auto &offset = matrixZ[row * cols + col];
if (unlikely (!offset.sanitize (&c->sanitizer, this))) return Null (Anchor);
*found = !offset.is_null ();
return this+offset;
}
template <typename Iterator,
@@ -58,14 +65,15 @@ struct AnchorMatrix
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->rows = num_rows;
bool ret = false;
for (const unsigned i : index_iter)
{
auto *offset = c->serializer->embed (matrixZ[i]);
if (!offset) return_trace (false);
offset->serialize_subset (c, matrixZ[i], this);
ret |= offset->serialize_subset (c, matrixZ[i], this);
}
return_trace (true);
return_trace (ret);
}
};

View File

@@ -24,16 +24,17 @@ struct EntryExitRecord
(src_base+exitAnchor).collect_variation_indices (c);
}
EntryExitRecord* subset (hb_subset_context_t *c,
const void *src_base) const
bool subset (hb_subset_context_t *c,
const void *src_base) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (nullptr);
if (unlikely (!out)) return_trace (false);
out->entryAnchor.serialize_subset (c, entryAnchor, src_base);
out->exitAnchor.serialize_subset (c, exitAnchor, src_base);
return_trace (out);
bool ret = false;
ret |= out->entryAnchor.serialize_subset (c, entryAnchor, src_base);
ret |= out->exitAnchor.serialize_subset (c, exitAnchor, src_base);
return_trace (ret);
}
protected:
@@ -91,7 +92,13 @@ struct CursivePosFormat1
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && entryExitRecord.sanitize (c, this));
if (unlikely (!coverage.sanitize (c, this)))
return_trace (false);
if (c->lazy_some_gpos)
return_trace (entryExitRecord.sanitize_shallow (c));
else
return_trace (entryExitRecord.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
@@ -119,19 +126,21 @@ struct CursivePosFormat1
hb_buffer_t *buffer = c->buffer;
const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)];
if (!this_record.entryAnchor) return_trace (false);
if (!this_record.entryAnchor ||
unlikely (!this_record.entryAnchor.sanitize (&c->sanitizer, this))) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)];
if (!prev_record.exitAnchor)
if (!prev_record.exitAnchor ||
unlikely (!prev_record.exitAnchor.sanitize (&c->sanitizer, this)))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
@@ -200,8 +209,8 @@ struct CursivePosFormat1
* Arabic. */
unsigned int child = i;
unsigned int parent = j;
hb_position_t x_offset = entry_x - exit_x;
hb_position_t y_offset = entry_y - exit_y;
hb_position_t x_offset = roundf (entry_x - exit_x);
hb_position_t y_offset = roundf (entry_y - exit_y);
if (!(c->lookup_props & LookupFlag::RightToLeft))
{
unsigned int k = child;
@@ -278,7 +287,6 @@ struct CursivePosFormat1
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!out)) return_trace (false);
auto it =
+ hb_zip (this+coverage, entryExitRecord)

View File

@@ -156,7 +156,7 @@ GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer)
{
for (unsigned i = 0; i < len; i++)
if (unlikely (pos[i].y_offset))
pos[i].x_offset += _hb_roundf (font->slant_xy * pos[i].y_offset);
pos[i].x_offset += roundf (font->slant_xy * pos[i].y_offset);
}
}

View File

@@ -27,6 +27,7 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
{
@@ -38,13 +39,13 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
;
matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
ret |= matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
}
return_trace (this->len);
return_trace (ret);
}
};

View File

@@ -28,7 +28,7 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
const Anchor& mark_anchor = this + record.markAnchor;
bool found;
const Anchor& glyph_anchor = anchors.get_anchor (glyph_index, mark_class, class_count, &found);
const Anchor& glyph_anchor = anchors.get_anchor (c, glyph_index, mark_class, class_count, &found);
/* If this subtable doesn't have an anchor for this base and this class,
* return false such that the subsequent subtables have a chance at it. */
if (unlikely (!found)) return_trace (false);
@@ -82,10 +82,10 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
| hb_map (hb_second)
;
bool ret = false;
unsigned new_length = 0;
for (const auto& mark_record : mark_iter) {
if (unlikely (!mark_record.subset (c, this, klass_mapping)))
return_trace (false);
ret |= mark_record.subset (c, this, klass_mapping);
new_length++;
}
@@ -93,7 +93,7 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
HB_SERIALIZE_ERROR_ARRAY_OVERFLOW)))
return_trace (false);
return_trace (true);
return_trace (ret);
}
};

View File

@@ -197,9 +197,10 @@ struct MarkBasePosFormat1_2
if (!out->markCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned basecount = (this+baseArray).rows;
auto base_iter =
@@ -228,11 +229,9 @@ struct MarkBasePosFormat1_2
;
}
out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
base_indexes.iter ());
return_trace (true);
return_trace (out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
base_indexes.iter ()));
}
};

View File

@@ -169,7 +169,7 @@ struct MarkLigPosFormat1_2
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
const hb_map_t &glyph_map = c->plan->glyph_map_gsub;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
@@ -195,23 +195,24 @@ struct MarkLigPosFormat1_2
if (!out->markCoverage.serialize_serialize (c->serializer, new_mark_coverage))
return_trace (false);
out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
auto new_ligature_coverage =
+ hb_iter (this + ligatureCoverage)
| hb_filter (glyphset)
| hb_take ((this + ligatureArray).len)
| hb_map_retains_sorting (glyph_map)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage))
return_trace (false);
out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage), classCount, &klass_mapping);
return_trace (true);
return_trace (out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage),
classCount, &klass_mapping));
}
};

View File

@@ -100,16 +100,16 @@ struct MarkMarkPosFormat1_2
/* now we search backwards for a suitable mark glyph until a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
unsigned unsafe_from;
if (!skippy_iter.prev (&unsafe_from))
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
if (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx]))
if (likely (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx])))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
@@ -183,9 +183,10 @@ struct MarkMarkPosFormat1_2
if (!out->mark1Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
out->mark1Array.serialize_subset (c, mark1Array, this,
(this+mark1Coverage).iter (),
&klass_mapping);
if (unlikely (!out->mark1Array.serialize_subset (c, mark1Array, this,
(this+mark1Coverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned mark2count = (this+mark2Array).rows;
auto mark2_iter =
@@ -214,9 +215,10 @@ struct MarkMarkPosFormat1_2
;
}
out->mark2Array.serialize_subset (c, mark2Array, this, mark2_iter.len (), mark2_indexes.iter ());
return_trace (out->mark2Array.serialize_subset (c, mark2Array, this,
mark2_iter.len (),
mark2_indexes.iter ()));
return_trace (true);
}
};

View File

@@ -24,17 +24,16 @@ struct MarkRecord
return_trace (c->check_struct (this) && markAnchor.sanitize (c, base));
}
MarkRecord *subset (hb_subset_context_t *c,
const void *src_base,
const hb_map_t *klass_mapping) const
bool subset (hb_subset_context_t *c,
const void *src_base,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (nullptr);
if (unlikely (!out)) return_trace (false);
out->klass = klass_mapping->get (klass);
out->markAnchor.serialize_subset (c, markAnchor, src_base);
return_trace (out);
return_trace (out->markAnchor.serialize_subset (c, markAnchor, src_base));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,

View File

@@ -110,9 +110,9 @@ struct PairPosFormat1_3
if (likely (index == NOT_COVERED)) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);

View File

@@ -50,13 +50,13 @@ struct PairPosFormat2_4
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int stride = HBUINT16::static_size * (len1 + len2);
unsigned int record_size = valueFormat1.get_size () + valueFormat2.get_size ();
unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count;
return_trace (c->check_range ((const void *) values,
count,
record_size) &&
valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) &&
valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride));
stride) &&
(c->lazy_some_gpos ||
(valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) &&
valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride))));
}
bool intersects (const hb_set_t *glyphs) const
@@ -131,40 +131,46 @@ struct PairPosFormat2_4
if (likely (index == NOT_COVERED)) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (buffer->idx, 1);
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);
}
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
if (!klass2)
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int record_len = len1 + len2;
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
const Value *v = &values[record_len * (klass1 * class2Count + klass2)];
bool applied_first = false, applied_second = false;
/* Isolate simple kerning and apply it half to each side.
* Results in better cursor positinoing / underline drawing.
* Results in better cursor positioning / underline drawing.
*
* Disabled, because causes issues... :-(
* https://github.com/harfbuzz/harfbuzz/issues/3408
* https://github.com/harfbuzz/harfbuzz/pull/3235#issuecomment-1029814978
*/
#ifndef HB_SPLIT_KERN
if (0)
if (false)
#endif
{
if (!len2)
@@ -224,8 +230,8 @@ struct PairPosFormat2_4
c->buffer->idx, skippy_iter.idx);
}
applied_first = valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
applied_first = len1 && valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = len2 && valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
@@ -293,11 +299,13 @@ struct PairPosFormat2_4
out->valueFormat2 = out->valueFormat2.drop_device_table_flags ();
}
unsigned total_len = len1 + len2;
hb_vector_t<unsigned> class2_idxs (+ hb_range ((unsigned) class2Count) | hb_filter (klass2_map));
for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map))
{
for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map))
for (unsigned class2_idx : class2_idxs)
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2);
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * total_len;
valueFormat1.copy_values (c->serializer, out->valueFormat1, this, &values[idx], &c->plan->layout_variation_idx_delta_map);
valueFormat2.copy_values (c->serializer, out->valueFormat2, this, &values[idx + len1], &c->plan->layout_variation_idx_delta_map);
}

View File

@@ -52,8 +52,9 @@ struct PairSet
unsigned int count = len;
const PairValueRecord *record = &firstPairValueRecord;
return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride));
return_trace (c->lazy_some_gpos ||
(closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride)));
}
bool intersects (const hb_set_t *glyphs,
@@ -120,8 +121,8 @@ struct PairSet
c->buffer->idx, pos);
}
bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
bool applied_first = len1 && valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = len2 && valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())

View File

@@ -22,7 +22,7 @@ struct PairValueRecord
ValueRecord values; /* Positioning data for the first glyph
* followed by for second glyph */
public:
DEFINE_SIZE_ARRAY (Types::size, values);
DEFINE_SIZE_ARRAY (Types::HBGlyphID::static_size, values);
int cmp (hb_codepoint_t k) const
{ return secondGlyph.cmp (k); }

View File

@@ -90,6 +90,7 @@ struct SinglePosFormat1
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
@@ -100,7 +101,7 @@ struct SinglePosFormat1
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this, values, pos);
return true;

View File

@@ -94,6 +94,7 @@ struct SinglePosFormat2
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
@@ -105,7 +106,7 @@ struct SinglePosFormat2
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer);
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this,
&values[index * valueFormat.get_len ()],

View File

@@ -118,21 +118,25 @@ struct ValueFormat : HBUINT16
auto *cache = c->var_store_cache;
/* pixel -> fractional pixel */
if (format & xPlaDevice) {
if (use_x_device) glyph_pos.x_offset += (base + get_device (values, &ret)).get_x_delta (font, store, cache);
if (format & xPlaDevice)
{
if (use_x_device) glyph_pos.x_offset += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yPlaDevice) {
if (use_y_device) glyph_pos.y_offset += (base + get_device (values, &ret)).get_y_delta (font, store, cache);
if (format & yPlaDevice)
{
if (use_y_device) glyph_pos.y_offset += get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
if (format & xAdvDevice) {
if (horizontal && use_x_device) glyph_pos.x_advance += (base + get_device (values, &ret)).get_x_delta (font, store, cache);
if (format & xAdvDevice)
{
if (horizontal && use_x_device) glyph_pos.x_advance += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yAdvDevice) {
if (format & yAdvDevice)
{
/* y_advance values grow downward but font-space grows upward, hence negation */
if (!horizontal && use_y_device) glyph_pos.y_advance -= (base + get_device (values, &ret)).get_y_delta (font, store, cache);
if (!horizontal && use_y_device) glyph_pos.y_advance -= get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
return ret;
@@ -174,6 +178,9 @@ struct ValueFormat : HBUINT16
if (format & xAdvance) x_adv = copy_value (c, new_format, xAdvance, *values++);
if (format & yAdvance) y_adv = copy_value (c, new_format, yAdvance, *values++);
if (!has_device ())
return;
if (format & xPlaDevice)
{
add_delta_to_value (x_placement, base, values, layout_variation_idx_delta_map);
@@ -233,14 +240,12 @@ struct ValueFormat : HBUINT16
if (format & ValueFormat::xAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
@@ -277,10 +282,22 @@ struct ValueFormat : HBUINT16
{
return *static_cast<Offset16To<Device> *> (value);
}
static inline const Offset16To<Device>& get_device (const Value* value, bool *worked=nullptr)
static inline const Offset16To<Device>& get_device (const Value* value)
{
return *static_cast<const Offset16To<Device> *> (value);
}
static inline const Device& get_device (const Value* value,
bool *worked,
const void *base,
hb_sanitize_context_t &c)
{
if (worked) *worked |= bool (*value);
return *static_cast<const Offset16To<Device> *> (value);
auto &offset = *static_cast<const Offset16To<Device> *> (value);
if (unlikely (!offset.sanitize (&c, base)))
return Null(Device);
return base + offset;
}
void add_delta_to_value (HBINT16 *value,
@@ -340,25 +357,26 @@ struct ValueFormat : HBUINT16
bool sanitize_value (hb_sanitize_context_t *c, const void *base, const Value *values) const
{
TRACE_SANITIZE (this);
return_trace (c->check_range (values, get_size ()) && (!has_device () || sanitize_value_devices (c, base, values)));
if (unlikely (!c->check_range (values, get_size ()))) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
return_trace (!has_device () || sanitize_value_devices (c, base, values));
}
bool sanitize_values (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count) const
{
TRACE_SANITIZE (this);
unsigned int len = get_len ();
unsigned size = get_size ();
if (!c->check_range (values, count, get_size ())) return_trace (false);
if (!c->check_range (values, count, size)) return_trace (false);
if (!has_device ()) return_trace (true);
if (c->lazy_some_gpos)
return_trace (true);
for (unsigned int i = 0; i < count; i++) {
if (!sanitize_value_devices (c, base, values))
return_trace (false);
values += len;
}
return_trace (true);
return_trace (sanitize_values_stride_unsafe (c, base, values, count, size));
}
/* Just sanitize referenced Device tables. Doesn't check the values themselves. */

View File

@@ -8,8 +8,6 @@ namespace OT {
namespace Layout {
namespace GSUB_impl {
typedef hb_pair_t<hb_codepoint_t, hb_codepoint_t> hb_codepoint_pair_t;
template<typename Iterator>
static void SingleSubst_serialize (hb_serialize_context_t *c,
Iterator it);

View File

@@ -10,10 +10,10 @@ namespace GSUB_impl {
template <typename Types>
struct Ligature
{
protected:
public:
typename Types::HBGlyphID
ligGlyph; /* GlyphID of ligature to substitute */
HeadlessArrayOf<typename Types::HBGlyphID>
HeadlessArray16Of<typename Types::HBGlyphID>
component; /* Array of component GlyphIDs--start
* with the second component--ordered
* in writing direction */

View File

@@ -75,12 +75,69 @@ struct LigatureSet
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int num_ligs = ligature.len;
#ifndef HB_NO_OT_RULESETS_FAST_PATH
if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 4)
#endif
{
slow:
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature.arrayZ[i];
if (lig.apply (c)) return_trace (true);
}
return_trace (false);
}
/* This version is optimized for speed by matching the first component
* of the ligature here, instead of calling into the ligation code.
*
* This is replicated in ChainRuleSet and RuleSet. */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
skippy_iter.reset (c->buffer->idx);
skippy_iter.set_match_func (match_always, nullptr);
skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
unsigned unsafe_to;
hb_codepoint_t first = (unsigned) -1;
bool matched = skippy_iter.next (&unsafe_to);
if (likely (matched))
{
first = c->buffer->info[skippy_iter.idx].codepoint;
unsafe_to = skippy_iter.idx + 1;
if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
{
/* Can't use the fast path if eg. the next char is a default-ignorable
* or other skippable. */
goto slow;
}
}
else
goto slow;
bool unsafe_to_concat = false;
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature[i];
if (lig.apply (c)) return_trace (true);
const auto &lig = this+ligature.arrayZ[i];
if (unlikely (lig.component.lenP1 <= 1) ||
lig.component.arrayZ[0] == first)
{
if (lig.apply (c))
{
if (unsafe_to_concat)
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (true);
}
}
else if (likely (lig.component.lenP1 > 1))
unsafe_to_concat = true;
}
if (likely (unsafe_to_concat))
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (false);
}

View File

@@ -191,7 +191,6 @@ struct ReverseChainSingleSubstFormat1
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->check_success (out))) return_trace (false);
if (unlikely (!c->serializer->embed (this->format))) return_trace (false);
if (unlikely (!c->serializer->embed (this->coverage))) return_trace (false);

View File

@@ -53,7 +53,7 @@ struct Sequence
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (multiple subtitution)",
"replaced glyph at %u (multiple substitution)",
c->buffer->idx - 1u);
}

View File

@@ -57,7 +57,7 @@ struct SingleSubst
#ifndef HB_NO_BEYOND_64K
if (+ glyphs
| hb_map_retains_sorting (hb_first)
| hb_map_retains_sorting (hb_second)
| hb_filter ([] (hb_codepoint_t gid) { return gid > 0xFFFFu; }))
{
format += 2;

View File

@@ -87,19 +87,54 @@ struct CompositeGlyphRecord
}
}
void transform_points (contour_point_vector_t &points,
static void transform (const float (&matrix)[4],
hb_array_t<contour_point_t> points)
{
if (matrix[0] != 1.f || matrix[1] != 0.f ||
matrix[2] != 0.f || matrix[3] != 1.f)
for (auto &point : points)
point.transform (matrix);
}
static void translate (const contour_point_t &trans,
hb_array_t<contour_point_t> points)
{
if (HB_OPTIMIZE_SIZE_VAL)
{
if (trans.x != 0.f || trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
}
else
{
if (trans.x != 0.f && trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
else
{
if (trans.x != 0.f)
for (auto &point : points)
point.x += trans.x;
else if (trans.y != 0.f)
for (auto &point : points)
point.y += trans.y;
}
}
}
void transform_points (hb_array_t<contour_point_t> points,
const float (&matrix)[4],
const contour_point_t &trans) const
{
if (scaled_offsets ())
{
points.translate (trans);
points.transform (matrix);
translate (trans, points);
transform (matrix, points);
}
else
{
points.transform (matrix);
points.translate (trans);
transform (matrix, points);
translate (trans, points);
}
}
@@ -108,8 +143,8 @@ struct CompositeGlyphRecord
float matrix[4];
contour_point_t trans;
get_transformation (matrix, trans);
if (unlikely (!points.resize (points.length + 1))) return false;
points[points.length - 1] = trans;
if (unlikely (!points.alloc (points.length + 4))) return false; // For phantom points
points.push (trans);
return true;
}
@@ -358,7 +393,7 @@ struct CompositeGlyph
{
/* last 4 points in points_with_deltas are phantom points and should not be included */
if (i >= points_with_deltas.length - 4) {
free (o);
hb_free (o);
return false;
}

View File

@@ -103,6 +103,63 @@ struct Glyph
}
}
bool get_all_points_without_var (const hb_face_t *face,
contour_point_vector_t &points /* OUT */) const
{
switch (type) {
case SIMPLE:
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points)))
return false;
break;
case COMPOSITE:
{
for (auto &item : get_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
for (auto &item : get_var_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#endif
case EMPTY:
break;
}
/* Init phantom points */
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
int lsb = 0;
int h_delta = face->table.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb) ?
(int) header->xMin - lsb : 0;
HB_UNUSED int tsb = 0;
int v_orig = (int) header->yMax +
#ifndef HB_NO_VERTICAL
((void) face->table.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb), tsb)
#else
0
#endif
;
unsigned h_adv = face->table.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
face->table.vmtx->get_advance_without_var_unscaled (gid)
#else
- face->get_upem ()
#endif
;
phantoms[PHANTOM_LEFT].x = h_delta;
phantoms[PHANTOM_RIGHT].x = (int) h_adv + h_delta;
phantoms[PHANTOM_TOP].y = v_orig;
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
}
return true;
}
void update_mtx (const hb_subset_plan_t *plan,
int xMin, int xMax,
int yMin, int yMax,
@@ -114,8 +171,8 @@ struct Glyph
if (type != EMPTY)
{
plan->bounds_width_map.set (new_gid, xMax - xMin);
plan->bounds_height_map.set (new_gid, yMax - yMin);
plan->bounds_width_vec[new_gid] = xMax - xMin;
plan->bounds_height_vec[new_gid] = yMax - yMin;
}
unsigned len = all_points.length;
@@ -124,10 +181,12 @@ struct Glyph
float topSideY = all_points[len - 2].y;
float bottomSideY = all_points[len - 1].y;
uint32_t hash = hb_hash (new_gid);
signed hori_aw = roundf (rightSideX - leftSideX);
if (hori_aw < 0) hori_aw = 0;
int lsb = roundf (xMin - leftSideX);
plan->hmtx_map.set (new_gid, hb_pair ((unsigned) hori_aw, lsb));
plan->hmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) hori_aw, lsb));
//flag value should be computed using non-empty glyphs
if (type != EMPTY && lsb != xMin)
plan->head_maxp_info.allXMinIsLsb = false;
@@ -135,7 +194,7 @@ struct Glyph
signed vert_aw = roundf (topSideY - bottomSideY);
if (vert_aw < 0) vert_aw = 0;
int tsb = roundf (topSideY - yMax);
plan->vmtx_map.set (new_gid, hb_pair ((unsigned) vert_aw, tsb));
plan->vmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) vert_aw, tsb));
}
bool compile_header_bytes (const hb_subset_plan_t *plan,
@@ -155,24 +214,28 @@ struct Glyph
{
xMin = xMax = all_points[0].x;
yMin = yMax = all_points[0].y;
unsigned count = all_points.length - 4;
for (unsigned i = 1; i < count; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
}
for (unsigned i = 1; i < all_points.length - 4; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
update_mtx (plan, roundf (xMin), roundf (xMax), roundf (yMin), roundf (yMax), all_points);
// These are destined for storage in a 16 bit field to clamp the values to
// fit into a 16 bit signed integer.
int rounded_xMin = hb_clamp (roundf (xMin), -32768.0f, 32767.0f);
int rounded_xMax = hb_clamp (roundf (xMax), -32768.0f, 32767.0f);
int rounded_yMin = hb_clamp (roundf (yMin), -32768.0f, 32767.0f);
int rounded_yMax = hb_clamp (roundf (yMax), -32768.0f, 32767.0f);
int rounded_xMin = roundf (xMin);
int rounded_xMax = roundf (xMax);
int rounded_yMin = roundf (yMin);
int rounded_yMax = roundf (yMax);
update_mtx (plan, rounded_xMin, rounded_xMax, rounded_yMin, rounded_yMax, all_points);
if (type != EMPTY)
{
@@ -287,6 +350,7 @@ struct Glyph
bool use_my_metrics = true,
bool phantom_only = false,
hb_array_t<int> coords = hb_array_t<int> (),
hb_map_t *current_glyphs = nullptr,
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
@@ -296,6 +360,10 @@ struct Glyph
if (unlikely (*edge_count > HB_GLYF_MAX_EDGE_COUNT)) return false;
(*edge_count)++;
hb_map_t current_glyphs_stack;
if (current_glyphs == nullptr)
current_glyphs = &current_glyphs_stack;
if (head_maxp_info)
{
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
@@ -305,9 +373,8 @@ struct Glyph
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t stack_points;
bool inplace = type == SIMPLE && all_points.length == 0;
/* Load into all_points if it's empty, as an optimization. */
contour_point_vector_t &points = inplace ? all_points : stack_points;
contour_point_vector_t &points = type == SIMPLE ? all_points : stack_points;
unsigned old_length = points.length;
switch (type) {
case SIMPLE:
@@ -315,7 +382,7 @@ struct Glyph
head_maxp_info->maxContours = hb_max (head_maxp_info->maxContours, (unsigned) header->numberOfContours);
if (depth > 0 && composite_contours)
*composite_contours += (unsigned) header->numberOfContours;
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only)))
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (all_points, phantom_only)))
return false;
break;
case COMPOSITE:
@@ -329,6 +396,7 @@ struct Glyph
{
for (auto &item : get_var_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
#endif
case EMPTY:
@@ -365,9 +433,11 @@ struct Glyph
}
#ifndef HB_NO_VAR
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ());
if (coords)
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
phantom_only && type == SIMPLE);
#endif
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
@@ -375,27 +445,33 @@ struct Glyph
if (points_with_deltas != nullptr && depth == 0 && type == COMPOSITE)
{
if (unlikely (!points_with_deltas->resize (points.length))) return false;
points_with_deltas->copy_vector (points);
*points_with_deltas = points;
}
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, points.length - 4);
if (!inplace)
all_points.extend (points.as_array ());
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, all_points.length - old_length - 4);
break;
case COMPOSITE:
{
contour_point_vector_t comp_points;
unsigned int comp_index = 0;
for (auto &item : get_composite_iterator ())
{
comp_points.reset ();
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (current_glyphs->has (item_gid)))
continue;
current_glyphs->add (item_gid);
unsigned old_count = all_points.length;
if (unlikely ((!phantom_only || (use_my_metrics && item.is_use_my_metrics ())) &&
!glyf_accelerator.glyph_for_gid (item_gid)
.get_points (font,
glyf_accelerator,
comp_points,
all_points,
points_with_deltas,
head_maxp_info,
composite_contours,
@@ -403,23 +479,32 @@ struct Glyph
use_my_metrics,
phantom_only,
coords,
current_glyphs,
depth + 1,
edge_count)))
{
current_glyphs->del (item_gid);
return false;
}
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
float matrix[4];
contour_point_t default_trans;
item.get_transformation (matrix, default_trans);
if (comp_points) // Empty in case of phantom_only
{
float matrix[4];
contour_point_t default_trans;
item.get_transformation (matrix, default_trans);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[comp_index]);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[comp_index]);
}
if (item.is_anchored ())
if (item.is_anchored () && !phantom_only)
{
unsigned int p1, p2;
item.get_anchor_points (p1, p2);
@@ -429,16 +514,20 @@ struct Glyph
delta.init (all_points[p1].x - comp_points[p2].x,
all_points[p1].y - comp_points[p2].y);
comp_points.translate (delta);
item.translate (delta, comp_points);
}
}
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
all_points.resize (all_points.length - PHANTOM_COUNT);
if (all_points.length > HB_GLYF_MAX_POINTS)
{
current_glyphs->del (item_gid);
return false;
}
comp_index++;
current_glyphs->del (item_gid);
}
if (head_maxp_info && depth == 0)
@@ -453,26 +542,37 @@ struct Glyph
#ifndef HB_NO_VAR_COMPOSITES
case VAR_COMPOSITE:
{
contour_point_vector_t comp_points;
hb_array_t<contour_point_t> points_left = points.as_array ();
for (auto &item : get_var_composite_iterator ())
{
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (current_glyphs->has (item_gid)))
continue;
current_glyphs->add (item_gid);
unsigned item_num_points = item.get_num_points ();
hb_array_t<contour_point_t> record_points = points_left.sub_array (0, item_num_points);
comp_points.reset ();
assert (record_points.length == item_num_points);
auto component_coords = coords;
if (item.is_reset_unspecified_axes ())
/* Copying coords is expensive; so we have put an arbitrary
* limit on the max number of coords for now. */
if (item.is_reset_unspecified_axes () ||
coords.length > HB_GLYF_VAR_COMPOSITE_MAX_AXES)
component_coords = hb_array<int> ();
coord_setter_t coord_setter (component_coords);
item.set_variations (coord_setter, record_points);
if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_gid ())
unsigned old_count = all_points.length;
if (unlikely ((!phantom_only || (use_my_metrics && item.is_use_my_metrics ())) &&
!glyf_accelerator.glyph_for_gid (item_gid)
.get_points (font,
glyf_accelerator,
comp_points,
all_points,
points_with_deltas,
head_maxp_info,
nullptr,
@@ -480,24 +580,36 @@ struct Glyph
use_my_metrics,
phantom_only,
coord_setter.get_coords (),
current_glyphs,
depth + 1,
edge_count)))
{
current_glyphs->del (item_gid);
return false;
}
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Apply component transformation */
item.transform_points (record_points, comp_points);
if (comp_points) // Empty in case of phantom_only
item.transform_points (record_points, comp_points);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
all_points.extend (comp_points.as_array ().sub_array (0, comp_points.length - PHANTOM_COUNT));
all_points.resize (all_points.length - PHANTOM_COUNT);
if (all_points.length > HB_GLYF_MAX_POINTS)
{
current_glyphs->del (item_gid);
return false;
}
points_left += item_num_points;
current_glyphs->del (item_gid);
}
all_points.extend (phantoms);
} break;
@@ -512,9 +624,10 @@ struct Glyph
/* Undocumented rasterizer behavior:
* Shift points horizontally by the updated left side bearing
*/
contour_point_t delta;
delta.init (-phantoms[PHANTOM_LEFT].x, 0.f);
if (delta.x) all_points.translate (delta);
int v = -phantoms[PHANTOM_LEFT].x;
if (v)
for (auto &point : all_points)
point.x += v;
}
return !all_points.in_error ();
@@ -545,10 +658,11 @@ struct Glyph
int num_contours = header->numberOfContours;
if (unlikely (num_contours == 0)) type = EMPTY;
else if (num_contours > 0) type = SIMPLE;
else if (num_contours == -1) type = COMPOSITE;
#ifndef HB_NO_VAR_COMPOSITES
else if (num_contours == -2) type = VAR_COMPOSITE;
#endif
else type = COMPOSITE; /* negative numbers */
else type = EMPTY; // Spec deviation; Spec says COMPOSITE, but not seen in the wild.
}
protected:

View File

@@ -124,7 +124,7 @@ struct SimpleGlyph
}
static bool read_flags (const HBUINT8 *&p /* IN/OUT */,
contour_point_vector_t &points_ /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end)
{
unsigned count = points_.length;
@@ -146,7 +146,7 @@ struct SimpleGlyph
}
static bool read_points (const HBUINT8 *&p /* IN/OUT */,
contour_point_vector_t &points_ /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end,
float contour_point_t::*m,
const simple_glyph_flag_t short_flag,
@@ -154,10 +154,9 @@ struct SimpleGlyph
{
int v = 0;
unsigned count = points_.length;
for (unsigned i = 0; i < count; i++)
for (auto &point : points_)
{
unsigned flag = points_[i].flag;
unsigned flag = point.flag;
if (flag & short_flag)
{
if (unlikely (p + 1 > end)) return false;
@@ -175,23 +174,27 @@ struct SimpleGlyph
p += HBINT16::static_size;
}
}
points_.arrayZ[i].*m = v;
point.*m = v;
}
return true;
}
bool get_contour_points (contour_point_vector_t &points_ /* OUT */,
bool get_contour_points (contour_point_vector_t &points /* OUT */,
bool phantom_only = false) const
{
const HBUINT16 *endPtsOfContours = &StructAfter<HBUINT16> (header);
int num_contours = header.numberOfContours;
assert (num_contours);
assert (num_contours > 0);
/* One extra item at the end, for the instruction-count below. */
if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours]))) return false;
unsigned int num_points = endPtsOfContours[num_contours - 1] + 1;
points_.alloc (num_points + 4, true); // Allocate for phantom points, to avoid a possible copy
if (!points_.resize (num_points)) return false;
unsigned old_length = points.length;
points.alloc (points.length + num_points + 4, true); // Allocate for phantom points, to avoid a possible copy
if (unlikely (!points.resize (points.length + num_points, false))) return false;
auto points_ = points.as_array ().sub_array (old_length);
if (!phantom_only)
hb_memset (points_.arrayZ, 0, sizeof (contour_point_t) * num_points);
if (phantom_only) return true;
for (int i = 0; i < num_contours; i++)
@@ -214,7 +217,7 @@ struct SimpleGlyph
}
static void encode_coord (int value,
uint8_t &flag,
unsigned &flag,
const simple_glyph_flag_t short_flag,
const simple_glyph_flag_t same_flag,
hb_vector_t<uint8_t> &coords /* OUT */)
@@ -239,9 +242,9 @@ struct SimpleGlyph
}
}
static void encode_flag (uint8_t &flag,
uint8_t &repeat,
uint8_t lastflag,
static void encode_flag (unsigned flag,
unsigned &repeat,
unsigned lastflag,
hb_vector_t<uint8_t> &flags /* OUT */)
{
if (flag == lastflag && repeat != 255)
@@ -262,7 +265,7 @@ struct SimpleGlyph
else
{
repeat = 0;
flags.push (flag);
flags.arrayZ[flags.length++] = flag;
}
}
@@ -282,13 +285,13 @@ struct SimpleGlyph
if (unlikely (!x_coords.alloc (2*num_points, true))) return false;
if (unlikely (!y_coords.alloc (2*num_points, true))) return false;
uint8_t lastflag = 255, repeat = 0;
unsigned lastflag = 255, repeat = 0;
int prev_x = 0, prev_y = 0;
for (unsigned i = 0; i < num_points; i++)
{
uint8_t flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE + FLAG_OVERLAP_SIMPLE;
unsigned flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE | FLAG_OVERLAP_SIMPLE | FLAG_CUBIC;
int cur_x = roundf (all_points.arrayZ[i].x);
int cur_y = roundf (all_points.arrayZ[i].y);

View File

@@ -22,7 +22,7 @@ struct SubsetGlyph
bool serialize (hb_serialize_context_t *c,
bool use_short_loca,
const hb_subset_plan_t *plan)
const hb_subset_plan_t *plan) const
{
TRACE_SERIALIZE (this);
@@ -40,7 +40,7 @@ struct SubsetGlyph
pad = 0;
while (pad_length > 0)
{
c->embed (pad);
(void) c->embed (pad);
pad_length--;
}

View File

@@ -36,24 +36,21 @@ struct VarCompositeGlyphRecord
unsigned int get_size () const
{
unsigned fl = flags;
unsigned int size = min_size;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 4 : 3;
unsigned axis_width = (fl & AXIS_INDICES_ARE_SHORT) ? 4 : 3;
size += numAxes * axis_width;
// gid
size += 2;
if (flags & GID_IS_24BIT) size += 1;
if (fl & GID_IS_24BIT) size += 1;
if (flags & HAVE_TRANSLATE_X) size += 2;
if (flags & HAVE_TRANSLATE_Y) size += 2;
if (flags & HAVE_ROTATION) size += 2;
if (flags & HAVE_SCALE_X) size += 2;
if (flags & HAVE_SCALE_Y) size += 2;
if (flags & HAVE_SKEW_X) size += 2;
if (flags & HAVE_SKEW_Y) size += 2;
if (flags & HAVE_TCENTER_X) size += 2;
if (flags & HAVE_TCENTER_Y) size += 2;
// 2 bytes each for the following flags
fl = fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y |
HAVE_ROTATION |
HAVE_SCALE_X | HAVE_SCALE_Y |
HAVE_SKEW_X | HAVE_SKEW_Y |
HAVE_TCENTER_X | HAVE_TCENTER_Y);
size += hb_popcount (fl) * 2;
return size;
}
@@ -66,17 +63,17 @@ struct VarCompositeGlyphRecord
hb_codepoint_t get_gid () const
{
if (flags & GID_IS_24BIT)
return StructAfter<const HBGlyphID24> (numAxes);
return * (const HBGlyphID24 *) &pad;
else
return StructAfter<const HBGlyphID16> (numAxes);
return * (const HBGlyphID16 *) &pad;
}
void set_gid (hb_codepoint_t gid)
{
if (flags & GID_IS_24BIT)
StructAfter<HBGlyphID24> (numAxes) = gid;
* (HBGlyphID24 *) &pad = gid;
else
StructAfter<HBGlyphID16> (numAxes) = gid;
* (HBGlyphID16 *) &pad = gid;
}
unsigned get_numAxes () const
@@ -86,26 +83,44 @@ struct VarCompositeGlyphRecord
unsigned get_num_points () const
{
unsigned fl = flags;
unsigned num = 0;
if (flags & AXES_HAVE_VARIATION) num += numAxes;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y)) num++;
if (flags & HAVE_ROTATION) num++;
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y)) num++;
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y)) num++;
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y)) num++;
if (fl & AXES_HAVE_VARIATION) num += numAxes;
/* Hopefully faster code, relying on the value of the flags. */
fl = (((fl & (HAVE_TRANSLATE_Y | HAVE_SCALE_Y | HAVE_SKEW_Y | HAVE_TCENTER_Y)) >> 1) | fl) &
(HAVE_TRANSLATE_X | HAVE_ROTATION | HAVE_SCALE_X | HAVE_SKEW_X | HAVE_TCENTER_X);
num += hb_popcount (fl);
return num;
/* Slower but more readable code. */
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y)) num++;
if (fl & HAVE_ROTATION) num++;
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y)) num++;
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y)) num++;
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y)) num++;
return num;
}
void transform_points (hb_array_t<contour_point_t> record_points,
contour_point_vector_t &points) const
void transform_points (hb_array_t<const contour_point_t> record_points,
hb_array_t<contour_point_t> points) const
{
float matrix[4];
contour_point_t trans;
get_transformation_from_points (record_points, matrix, trans);
get_transformation_from_points (record_points.arrayZ, matrix, trans);
points.transform (matrix);
points.translate (trans);
auto arrayZ = points.arrayZ;
unsigned count = points.length;
if (matrix[0] != 1.f || matrix[1] != 0.f ||
matrix[2] != 0.f || matrix[3] != 1.f)
for (unsigned i = 0; i < count; i++)
arrayZ[i].transform (matrix);
if (trans.x != 0.f || trans.y != 0.f)
for (unsigned i = 0; i < count; i++)
arrayZ[i].translate (trans);
}
static inline void transform (float (&matrix)[4], contour_point_t &trans,
@@ -136,26 +151,41 @@ struct VarCompositeGlyphRecord
static void translate (float (&matrix)[4], contour_point_t &trans,
float translateX, float translateY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L213
float other[6] = {1.f, 0.f, 0.f, 1.f, translateX, translateY};
transform (matrix, trans, other);
if (!translateX && !translateY)
return;
trans.x += matrix[0] * translateX + matrix[2] * translateY;
trans.y += matrix[1] * translateX + matrix[3] * translateY;
}
static void scale (float (&matrix)[4], contour_point_t &trans,
float scaleX, float scaleY)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L224
float other[6] = {scaleX, 0.f, 0.f, scaleY, 0.f, 0.f};
transform (matrix, trans, other);
if (scaleX == 1.f && scaleY == 1.f)
return;
matrix[0] *= scaleX;
matrix[1] *= scaleX;
matrix[2] *= scaleY;
matrix[3] *= scaleY;
}
static void rotate (float (&matrix)[4], contour_point_t &trans,
float rotation)
{
if (!rotation)
return;
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L240
rotation = rotation * HB_PI;
float c = cosf (rotation);
float s = sinf (rotation);
float c;
float s;
#ifdef HAVE_SINCOSF
sincosf (rotation, &s, &c);
#else
c = cosf (rotation);
s = sinf (rotation);
#endif
float other[6] = {c, s, -s, c, 0.f, 0.f};
transform (matrix, trans, other);
}
@@ -163,101 +193,100 @@ struct VarCompositeGlyphRecord
static void skew (float (&matrix)[4], contour_point_t &trans,
float skewX, float skewY)
{
if (!skewX && !skewY)
return;
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L255
skewX = skewX * HB_PI;
skewY = skewY * HB_PI;
float other[6] = {1.f, tanf (skewY), tanf (skewX), 1.f, 0.f, 0.f};
float other[6] = {1.f,
skewY ? tanf (skewY) : 0.f,
skewX ? tanf (skewX) : 0.f,
1.f,
0.f, 0.f};
transform (matrix, trans, other);
}
bool get_points (contour_point_vector_t &points) const
{
float translateX = 0.f;
float translateY = 0.f;
float rotation = 0.f;
float scaleX = 1.f * (1 << 10);
float scaleY = 1.f * (1 << 10);
float skewX = 0.f;
float skewY = 0.f;
float tCenterX = 0.f;
float tCenterY = 0.f;
unsigned num_points = get_num_points ();
if (unlikely (!points.resize (points.length + num_points))) return false;
points.alloc (points.length + num_points + 4); // For phantom points
if (unlikely (!points.resize (points.length + num_points, false))) return false;
contour_point_t *rec_points = points.arrayZ + (points.length - num_points);
hb_memset (rec_points, 0, num_points * sizeof (rec_points[0]));
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned axes_size = numAxes * axis_width;
unsigned fl = flags;
unsigned num_axes = numAxes;
unsigned axis_width = (fl & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned axes_size = num_axes * axis_width;
const F2DOT14 *q = (const F2DOT14 *) (axes_size +
(flags & GID_IS_24BIT ? 3 : 2) +
&StructAfter<const HBUINT8> (numAxes));
(fl & GID_IS_24BIT ? 3 : 2) +
(const HBUINT8 *) &pad);
hb_array_t<contour_point_t> rec_points = points.as_array ().sub_array (points.length - num_points);
unsigned count = numAxes;
if (flags & AXES_HAVE_VARIATION)
unsigned count = num_axes;
if (fl & AXES_HAVE_VARIATION)
{
for (unsigned i = 0; i < count; i++)
rec_points[i].x = q++->to_int ();
rec_points += count;
rec_points++->x = q++->to_int ();
}
else
q += count;
const HBUINT16 *p = (const HBUINT16 *) q;
if (flags & HAVE_TRANSLATE_X) translateX = * (const FWORD *) p++;
if (flags & HAVE_TRANSLATE_Y) translateY = * (const FWORD *) p++;
if (flags & HAVE_ROTATION) rotation = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SCALE_X) scaleX = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SCALE_Y) scaleY = ((const F6DOT10 *) p++)->to_int ();
if (flags & HAVE_SKEW_X) skewX = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_SKEW_Y) skewY = ((const F4DOT12 *) p++)->to_int ();
if (flags & HAVE_TCENTER_X) tCenterX = * (const FWORD *) p++;
if (flags & HAVE_TCENTER_Y) tCenterY = * (const FWORD *) p++;
if ((flags & UNIFORM_SCALE) && !(flags & HAVE_SCALE_Y))
scaleY = scaleX;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
rec_points[0].x = translateX;
rec_points[0].y = translateY;
int translateX = (fl & HAVE_TRANSLATE_X) ? * (const FWORD *) p++ : 0;
int translateY = (fl & HAVE_TRANSLATE_Y) ? * (const FWORD *) p++ : 0;
rec_points->x = translateX;
rec_points->y = translateY;
rec_points++;
}
if (flags & HAVE_ROTATION)
if (fl & HAVE_ROTATION)
{
rec_points[0].x = rotation;
int rotation = (fl & HAVE_ROTATION) ? ((const F4DOT12 *) p++)->to_int () : 0;
rec_points->x = rotation;
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
rec_points[0].x = scaleX;
rec_points[0].y = scaleY;
int scaleX = (fl & HAVE_SCALE_X) ? ((const F6DOT10 *) p++)->to_int () : 1 << 10;
int scaleY = (fl & HAVE_SCALE_Y) ? ((const F6DOT10 *) p++)->to_int () : 1 << 10;
if ((fl & UNIFORM_SCALE) && !(fl & HAVE_SCALE_Y))
scaleY = scaleX;
rec_points->x = scaleX;
rec_points->y = scaleY;
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
rec_points[0].x = skewX;
rec_points[0].y = skewY;
int skewX = (fl & HAVE_SKEW_X) ? ((const F4DOT12 *) p++)->to_int () : 0;
int skewY = (fl & HAVE_SKEW_Y) ? ((const F4DOT12 *) p++)->to_int () : 0;
rec_points->x = skewX;
rec_points->y = skewY;
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
rec_points[0].x = tCenterX;
rec_points[0].y = tCenterY;
int tCenterX = (fl & HAVE_TCENTER_X) ? * (const FWORD *) p++ : 0;
int tCenterY = (fl & HAVE_TCENTER_Y) ? * (const FWORD *) p++ : 0;
rec_points->x = tCenterX;
rec_points->y = tCenterY;
rec_points++;
}
assert (!rec_points);
return true;
}
void get_transformation_from_points (hb_array_t<contour_point_t> rec_points,
void get_transformation_from_points (const contour_point_t *rec_points,
float (&matrix)[4], contour_point_t &trans) const
{
if (flags & AXES_HAVE_VARIATION)
unsigned fl = flags;
if (fl & AXES_HAVE_VARIATION)
rec_points += numAxes;
matrix[0] = matrix[3] = 1.f;
@@ -274,36 +303,35 @@ struct VarCompositeGlyphRecord
float tCenterX = 0.f;
float tCenterY = 0.f;
if (flags & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
if (fl & (HAVE_TRANSLATE_X | HAVE_TRANSLATE_Y))
{
translateX = rec_points[0].x;
translateY = rec_points[0].y;
translateX = rec_points->x;
translateY = rec_points->y;
rec_points++;
}
if (flags & HAVE_ROTATION)
if (fl & HAVE_ROTATION)
{
rotation = rec_points[0].x / (1 << 12);
rotation = rec_points->x / (1 << 12);
rec_points++;
}
if (flags & (HAVE_SCALE_X | HAVE_SCALE_Y))
if (fl & (HAVE_SCALE_X | HAVE_SCALE_Y))
{
scaleX = rec_points[0].x / (1 << 10);
scaleY = rec_points[0].y / (1 << 10);
scaleX = rec_points->x / (1 << 10);
scaleY = rec_points->y / (1 << 10);
rec_points++;
}
if (flags & (HAVE_SKEW_X | HAVE_SKEW_Y))
if (fl & (HAVE_SKEW_X | HAVE_SKEW_Y))
{
skewX = rec_points[0].x / (1 << 12);
skewY = rec_points[0].y / (1 << 12);
skewX = rec_points->x / (1 << 12);
skewY = rec_points->y / (1 << 12);
rec_points++;
}
if (flags & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
if (fl & (HAVE_TCENTER_X | HAVE_TCENTER_Y))
{
tCenterX = rec_points[0].x;
tCenterY = rec_points[0].y;
tCenterX = rec_points->x;
tCenterY = rec_points->y;
rec_points++;
}
assert (!rec_points);
translate (matrix, trans, translateX + tCenterX, translateY + tCenterY);
rotate (matrix, trans, rotation);
@@ -317,18 +345,19 @@ struct VarCompositeGlyphRecord
{
bool have_variations = flags & AXES_HAVE_VARIATION;
unsigned axis_width = (flags & AXIS_INDICES_ARE_SHORT) ? 2 : 1;
unsigned num_axes = numAxes;
const HBUINT8 *p = (const HBUINT8 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
const HBUINT16 *q = (const HBUINT16 *) (((HBUINT8 *) &numAxes) + numAxes.static_size + (flags & GID_IS_24BIT ? 3 : 2));
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + numAxes) : (HBUINT8 *) (q + numAxes)));
const F2DOT14 *a = (const F2DOT14 *) ((HBUINT8 *) (axis_width == 1 ? (p + num_axes) : (HBUINT8 *) (q + num_axes)));
unsigned count = numAxes;
unsigned count = num_axes;
for (unsigned i = 0; i < count; i++)
{
unsigned axis_index = axis_width == 1 ? (unsigned) *p++ : (unsigned) *q++;
signed v = have_variations ? rec_points[i].x : a++->to_int ();
signed v = have_variations ? rec_points.arrayZ[i].x : a++->to_int ();
v = hb_clamp (v, -(1<<14), (1<<14));
setter[axis_index] = v;
@@ -338,8 +367,9 @@ struct VarCompositeGlyphRecord
protected:
HBUINT16 flags;
HBUINT8 numAxes;
HBUINT16 pad;
public:
DEFINE_SIZE_MIN (3);
DEFINE_SIZE_MIN (5);
};
using var_composite_iter_t = composite_iter_tmpl<VarCompositeGlyphRecord>;

View File

@@ -16,6 +16,8 @@ struct coord_setter_t
int& operator [] (unsigned idx)
{
if (unlikely (idx >= HB_GLYF_VAR_COMPOSITE_MAX_AXES))
return Crap(int);
if (coords.length < idx + 1)
coords.resize (idx + 1);
return coords[idx];

View File

@@ -12,24 +12,44 @@ namespace OT {
namespace glyf_impl {
template<typename IteratorIn, typename IteratorOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int)),
hb_requires (hb_is_sink_of (IteratorOut, unsigned))>
template<typename IteratorIn, typename TypeOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int))>
static void
_write_loca (IteratorIn&& it, bool short_offsets, IteratorOut&& dest)
_write_loca (IteratorIn&& it,
const hb_sorted_vector_t<hb_codepoint_pair_t> new_to_old_gid_list,
bool short_offsets,
TypeOut *dest,
unsigned num_offsets)
{
unsigned right_shift = short_offsets ? 1 : 0;
unsigned int offset = 0;
dest << 0;
+ it
| hb_map ([=, &offset] (unsigned int padded_size)
{
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry offset %u", offset);
return offset >> right_shift;
})
| hb_sink (dest)
;
unsigned offset = 0;
TypeOut value;
value = 0;
*dest++ = value;
hb_codepoint_t last = 0;
for (auto _ : new_to_old_gid_list)
{
hb_codepoint_t gid = _.first;
for (; last < gid; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
unsigned padded_size = *it++;
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry gid %u offset %u padded-size %u", gid, offset, padded_size);
value = offset >> right_shift;
*dest++ = value;
last++; // Skip over gid
}
unsigned num_glyphs = num_offsets - 1;
for (; last < num_glyphs; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
}
static bool
@@ -67,11 +87,14 @@ _add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca)
template<typename Iterator,
hb_requires (hb_is_source_of (Iterator, unsigned int))>
static bool
_add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_short_loca)
_add_loca_and_head (hb_subset_context_t *c,
Iterator padded_offsets,
bool use_short_loca)
{
unsigned num_offsets = padded_offsets.len () + 1;
unsigned num_offsets = c->plan->num_output_glyphs () + 1;
unsigned entry_size = use_short_loca ? 2 : 4;
char *loca_prime_data = (char *) hb_calloc (entry_size, num_offsets);
char *loca_prime_data = (char *) hb_malloc (entry_size * num_offsets);
if (unlikely (!loca_prime_data)) return false;
@@ -79,9 +102,9 @@ _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_s
entry_size, num_offsets, entry_size * num_offsets);
if (use_short_loca)
_write_loca (padded_offsets, true, hb_array ((HBUINT16 *) loca_prime_data, num_offsets));
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, true, (HBUINT16 *) loca_prime_data, num_offsets);
else
_write_loca (padded_offsets, false, hb_array ((HBUINT32 *) loca_prime_data, num_offsets));
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, false, (HBUINT32 *) loca_prime_data, num_offsets);
hb_blob_t *loca_blob = hb_blob_create (loca_prime_data,
entry_size * num_offsets,
@@ -89,8 +112,8 @@ _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_s
loca_prime_data,
hb_free);
bool result = plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version (plan, use_short_loca);
bool result = c->plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version (c->plan, use_short_loca);
hb_blob_destroy (loca_blob);
return result;

View File

@@ -85,75 +85,72 @@ struct glyf
return_trace (false);
}
glyf *glyf_prime = c->serializer->start_embed <glyf> ();
if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false);
hb_font_t *font = nullptr;
if (c->plan->normalized_coords)
{
font = _create_font_for_instancing (c->plan);
if (unlikely (!font)) return false;
if (unlikely (!font))
return_trace (false);
}
hb_vector_t<unsigned> padded_offsets;
unsigned num_glyphs = c->plan->num_output_glyphs ();
if (unlikely (!padded_offsets.resize (num_glyphs)))
{
hb_font_destroy (font);
return false;
}
if (unlikely (!padded_offsets.alloc (c->plan->new_to_old_gid_list.length, true)))
return_trace (false);
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
if (!_populate_subset_glyphs (c->plan, font, glyphs))
{
hb_font_destroy (font);
return false;
return_trace (false);
}
if (font)
hb_font_destroy (font);
unsigned max_offset = 0;
for (unsigned i = 0; i < num_glyphs; i++)
for (auto &g : glyphs)
{
padded_offsets[i] = glyphs[i].padded_size ();
max_offset += padded_offsets[i];
unsigned size = g.padded_size ();
padded_offsets.push (size);
max_offset += size;
}
bool use_short_loca = false;
if (likely (!c->plan->force_long_loca))
use_short_loca = max_offset < 0x1FFFF;
if (!use_short_loca) {
for (unsigned i = 0; i < num_glyphs; i++)
padded_offsets[i] = glyphs[i].length ();
if (!use_short_loca)
{
padded_offsets.resize (0);
for (auto &g : glyphs)
padded_offsets.push (g.length ());
}
bool result = glyf_prime->serialize (c->serializer, glyphs.writer (), use_short_loca, c->plan);
auto *glyf_prime = c->serializer->start_embed <glyf> ();
bool result = glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan);
if (c->plan->normalized_coords && !c->plan->pinned_at_default)
_free_compiled_subset_glyphs (glyphs);
if (!result) return false;
if (unlikely (!c->serializer->check_success (glyf_impl::_add_loca_and_head (c,
padded_offsets.iter (),
use_short_loca))))
return_trace (false);
if (unlikely (c->serializer->in_error ())) return_trace (false);
return_trace (c->serializer->check_success (glyf_impl::_add_loca_and_head (c->plan,
padded_offsets.iter (),
use_short_loca)));
return result;
}
bool
_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph> &glyphs /* OUT */) const;
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const;
hb_font_t *
_create_font_for_instancing (const hb_subset_plan_t *plan) const;
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs) const
{
for (unsigned i = 0; i < glyphs.length; i++)
glyphs[i].free_compiled_bytes ();
for (auto &g : glyphs)
g.free_compiled_bytes ();
}
protected:
@@ -222,13 +219,14 @@ struct glyf_accelerator_t
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, nullptr, nullptr, nullptr, true, true, phantom_only)))
return false;
unsigned count = all_points.length;
assert (count >= glyf_impl::PHANTOM_COUNT);
count -= glyf_impl::PHANTOM_COUNT;
if (consumer.is_consuming_contour_points ())
{
unsigned count = all_points.length;
assert (count >= glyf_impl::PHANTOM_COUNT);
count -= glyf_impl::PHANTOM_COUNT;
for (unsigned point_index = 0; point_index < count; point_index++)
consumer.consume_point (all_points[point_index]);
for (auto &point : all_points.as_array ().sub_array (0, count))
consumer.consume_point (point);
consumer.points_end ();
}
@@ -236,7 +234,7 @@ struct glyf_accelerator_t
contour_point_t *phantoms = consumer.get_phantoms_sink ();
if (phantoms)
for (unsigned i = 0; i < glyf_impl::PHANTOM_COUNT; ++i)
phantoms[i] = all_points[all_points.length - glyf_impl::PHANTOM_COUNT + i];
phantoms[i] = all_points.arrayZ[count + i];
return true;
}
@@ -299,6 +297,7 @@ struct glyf_accelerator_t
if (extents) bounds = contour_bounds_t ();
}
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point) { bounds.add (point); }
void points_end () { bounds.get_extents (font, extents, scaled); }
@@ -431,16 +430,17 @@ glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const
{
OT::glyf_accelerator_t glyf (plan->source);
unsigned num_glyphs = plan->num_output_glyphs ();
if (!glyphs.resize (num_glyphs)) return false;
if (!glyphs.alloc (plan->new_to_old_gid_list.length, true)) return false;
for (auto p : plan->glyph_map->iter ())
for (const auto &pair : plan->new_to_old_gid_list)
{
unsigned new_gid = p.second;
glyf_impl::SubsetGlyph& subset_glyph = glyphs.arrayZ[new_gid];
subset_glyph.old_gid = p.first;
hb_codepoint_t new_gid = pair.first;
hb_codepoint_t old_gid = pair.second;
glyf_impl::SubsetGlyph *p = glyphs.push ();
glyf_impl::SubsetGlyph& subset_glyph = *p;
subset_glyph.old_gid = old_gid;
if (unlikely (new_gid == 0 &&
if (unlikely (old_gid == 0 && new_gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) &&
!plan->normalized_coords)
subset_glyph.source_glyph = glyf_impl::Glyph ();
@@ -487,7 +487,7 @@ glyf::_create_font_for_instancing (const hb_subset_plan_t *plan) const
{
hb_variation_t var;
var.tag = _.first;
var.value = _.second;
var.value = _.second.middle;
vars.push (var);
}

View File

@@ -21,19 +21,15 @@ struct path_builder_t
operator bool () const { return has_data; }
bool has_data = false;
float x = 0.;
float y = 0.;
float x;
float y;
optional_point_t lerp (optional_point_t p, float t)
{ return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); }
optional_point_t mid (optional_point_t p)
{ return optional_point_t ((x + p.x) * 0.5f, (y + p.y) * 0.5f); }
} first_oncurve, first_offcurve, first_offcurve2, last_offcurve, last_offcurve2;
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_)
{
font = font_;
draw_session = &draw_session_;
first_oncurve = first_offcurve = first_offcurve2 = last_offcurve = last_offcurve2 = optional_point_t ();
}
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) :
font (font_), draw_session (&draw_session_) {}
/* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287
See also:
@@ -41,6 +37,7 @@ struct path_builder_t
* https://stackoverflow.com/a/20772557
*
* Cubic support added. */
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point)
{
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
@@ -50,7 +47,7 @@ struct path_builder_t
bool is_cubic = !is_on_curve && (point.flag & glyf_impl::SimpleGlyph::FLAG_CUBIC);
#endif
optional_point_t p (font->em_fscalef_x (point.x), font->em_fscalef_y (point.y));
if (!first_oncurve)
if (unlikely (!first_oncurve))
{
if (is_on_curve)
{
@@ -66,7 +63,7 @@ struct path_builder_t
}
else if (first_offcurve)
{
optional_point_t mid = first_offcurve.lerp (p, .5f);
optional_point_t mid = first_offcurve.mid (p);
first_oncurve = mid;
last_offcurve = p;
draw_session->move_to (mid.x, mid.y);
@@ -102,7 +99,7 @@ struct path_builder_t
}
else
{
optional_point_t mid = last_offcurve.lerp (p, .5f);
optional_point_t mid = last_offcurve.mid (p);
if (is_cubic)
{
@@ -127,13 +124,13 @@ struct path_builder_t
}
}
if (point.is_end_point)
if (unlikely (point.is_end_point))
{
if (first_offcurve && last_offcurve)
{
optional_point_t mid = last_offcurve.lerp (first_offcurve2 ?
first_offcurve2 :
first_offcurve, .5f);
optional_point_t mid = last_offcurve.mid (first_offcurve2 ?
first_offcurve2 :
first_offcurve);
if (last_offcurve2)
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,

Some files were not shown because too many files have changed in this diff Show More