summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTor Lillqvist <tml@iki.fi>2012-08-28 13:56:55 +0300
committerTor Lillqvist <tml@iki.fi>2012-09-24 17:00:01 +0300
commite8037c7074dce14cd31c93d2f8c5d07a3a753a0c (patch)
tree67c7dea7573cf028d19f0303dc726a215caab335
parent99df35a621aab3b4b92360bb2cab79aca53c25c6 (diff)
Import the "faulty.lib" linker for Android
From https://github.com/glandium/faulty.lib at commit 5afe2a7377c5c35c67c3c2a74c403b5ca64cf3b3 . Renamed .cpp files to .cxx to match gbuild's expectations.
-rw-r--r--sal/Library_lo-bootstrap.mk9
-rw-r--r--sal/android/faulty.lib/linker/CustomElf.cxx733
-rw-r--r--sal/android/faulty.lib/linker/CustomElf.h191
-rw-r--r--sal/android/faulty.lib/linker/ElfLoader.cxx723
-rw-r--r--sal/android/faulty.lib/linker/ElfLoader.h468
-rw-r--r--sal/android/faulty.lib/linker/Elfxx.h236
-rw-r--r--sal/android/faulty.lib/linker/Logging.h22
-rw-r--r--sal/android/faulty.lib/linker/Mappable.cxx519
-rw-r--r--sal/android/faulty.lib/linker/Mappable.h248
-rw-r--r--sal/android/faulty.lib/linker/SeekableZStream.cxx100
-rw-r--r--sal/android/faulty.lib/linker/SeekableZStream.h102
-rw-r--r--sal/android/faulty.lib/linker/Utils.h566
-rw-r--r--sal/android/faulty.lib/linker/Zip.cxx180
-rw-r--r--sal/android/faulty.lib/linker/Zip.h336
-rw-r--r--sal/android/faulty.lib/mozilla/Assertions.h376
-rw-r--r--sal/android/faulty.lib/mozilla/Attributes.h322
-rw-r--r--sal/android/faulty.lib/mozilla/RefPtr.h406
17 files changed, 5537 insertions, 0 deletions
diff --git a/sal/Library_lo-bootstrap.mk b/sal/Library_lo-bootstrap.mk
index dea6bb4ab30c..b799192604e1 100644
--- a/sal/Library_lo-bootstrap.mk
+++ b/sal/Library_lo-bootstrap.mk
@@ -37,8 +37,17 @@ $(eval $(call gb_Library_add_cobjects,lo-bootstrap,\
sal/android/lo-bootstrap \
))
+$(eval $(call gb_Library_add_cxxobjects,lo-bootstrap,\
+ sal/android/faulty.lib/linker/CustomElf \
+ sal/android/faulty.lib/linker/ElfLoader \
+ sal/android/faulty.lib/linker/Mappable \
+ sal/android/faulty.lib/linker/SeekableZStream \
+ sal/android/faulty.lib/linker/Zip \
+))
+
$(eval $(call gb_Library_set_include,lo-bootstrap,\
$$(INCLUDE) \
+ -I$(SRCDIR)/sal/android/faulty.lib \
-I$(SRCDIR)/sal/inc \
))
diff --git a/sal/android/faulty.lib/linker/CustomElf.cxx b/sal/android/faulty.lib/linker/CustomElf.cxx
new file mode 100644
index 000000000000..d2c41b1d27db
--- /dev/null
+++ b/sal/android/faulty.lib/linker/CustomElf.cxx
@@ -0,0 +1,733 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstring>
+#include <sys/mman.h>
+#include <vector>
+#include <dlfcn.h>
+#include "CustomElf.h"
+#include "Mappable.h"
+#include "Logging.h"
+
+using namespace Elf;
+using namespace mozilla;
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (~ (PAGE_SIZE - 1))
+#endif
+
+/* TODO: Fill ElfLoader::Singleton.lastError on errors. */
+
+const Ehdr *Ehdr::validate(const void *buf)
+{
+ if (!buf || buf == MAP_FAILED)
+ return NULL;
+
+ const Ehdr *ehdr = reinterpret_cast<const Ehdr *>(buf);
+
+ /* Only support ELF executables or libraries for the host system */
+ if (memcmp(ELFMAG, &ehdr->e_ident, SELFMAG) ||
+ ehdr->e_ident[EI_CLASS] != ELFCLASS ||
+ ehdr->e_ident[EI_DATA] != ELFDATA ||
+ ehdr->e_ident[EI_VERSION] != 1 ||
+ (ehdr->e_ident[EI_OSABI] != ELFOSABI && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) ||
+#ifdef EI_ABIVERSION
+ ehdr->e_ident[EI_ABIVERSION] != ELFABIVERSION ||
+#endif
+ (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ||
+ ehdr->e_machine != ELFMACHINE ||
+ ehdr->e_version != 1 ||
+ ehdr->e_phentsize != sizeof(Phdr))
+ return NULL;
+
+ return ehdr;
+}
+
+namespace {
+
+void debug_phdr(const char *type, const Phdr *phdr)
+{
+ debug("%s @0x%08" PRIxAddr " ("
+ "filesz: 0x%08" PRIxAddr ", "
+ "memsz: 0x%08" PRIxAddr ", "
+ "offset: 0x%08" PRIxAddr ", "
+ "flags: %c%c%c)",
+ type, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz,
+ phdr->p_offset, phdr->p_flags & PF_R ? 'r' : '-',
+ phdr->p_flags & PF_W ? 'w' : '-', phdr->p_flags & PF_X ? 'x' : '-');
+}
+
+} /* anonymous namespace */
+
+/**
+ * RAII wrapper for a mapping of the first page off a Mappable object.
+ * This calls Mappable::munmap instead of system munmap.
+ */
+class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
+public:
+ Mappable1stPagePtr(Mappable *mappable)
+ : GenericMappedPtr<Mappable1stPagePtr>(
+ mappable->mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, 0), PAGE_SIZE)
+ , mappable(mappable)
+ {
+ /* Ensure the content of this page */
+ mappable->ensure(*this);
+ }
+
+private:
+ friend class GenericMappedPtr<Mappable1stPagePtr>;
+ void munmap(void *buf, size_t length) {
+ mappable->munmap(buf, length);
+ }
+
+ Mappable *mappable;
+};
+
+
+TemporaryRef<LibHandle>
+CustomElf::Load(Mappable *mappable, const char *path, int flags)
+{
+ debug("CustomElf::Load(\"%s\", %x) = ...", path, flags);
+ if (!mappable)
+ return NULL;
+ /* Keeping a RefPtr of the CustomElf is going to free the appropriate
+ * resources when returning NULL */
+ RefPtr<CustomElf> elf = new CustomElf(mappable, path);
+ /* Map the first page of the Elf object to access Elf and program headers */
+ Mappable1stPagePtr ehdr_raw(mappable);
+ if (ehdr_raw == MAP_FAILED)
+ return NULL;
+
+ const Ehdr *ehdr = Ehdr::validate(ehdr_raw);
+ if (!ehdr)
+ return NULL;
+
+ /* Scan Elf Program Headers and gather some information about them */
+ std::vector<const Phdr *> pt_loads;
+ Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest
+ Addr max_vaddr = 0; // virtual address used by this Elf.
+ const Phdr *dyn = NULL;
+
+ const Phdr *first_phdr = reinterpret_cast<const Phdr *>(
+ reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
+ const Phdr *end_phdr = &first_phdr[ehdr->e_phnum];
+
+ for (const Phdr *phdr = first_phdr; phdr < end_phdr; phdr++) {
+ switch (phdr->p_type) {
+ case PT_LOAD:
+ debug_phdr("PT_LOAD", phdr);
+ pt_loads.push_back(phdr);
+ if (phdr->p_vaddr < min_vaddr)
+ min_vaddr = phdr->p_vaddr;
+ if (max_vaddr < phdr->p_vaddr + phdr->p_memsz)
+ max_vaddr = phdr->p_vaddr + phdr->p_memsz;
+ break;
+ case PT_DYNAMIC:
+ debug_phdr("PT_DYNAMIC", phdr);
+ if (!dyn) {
+ dyn = phdr;
+ } else {
+ log("%s: Multiple PT_DYNAMIC segments detected", elf->GetPath());
+ return NULL;
+ }
+ break;
+ case PT_TLS:
+ debug_phdr("PT_TLS", phdr);
+ if (phdr->p_memsz) {
+ log("%s: TLS is not supported", elf->GetPath());
+ return NULL;
+ }
+ break;
+ case PT_GNU_STACK:
+ debug_phdr("PT_GNU_STACK", phdr);
+// Skip on Android until bug 706116 is fixed
+#ifndef ANDROID
+ if (phdr->p_flags & PF_X) {
+ log("%s: Executable stack is not supported", elf->GetPath());
+ return NULL;
+ }
+#endif
+ break;
+ default:
+ debug("%s: Warning: program header type #%d not handled",
+ elf->GetPath(), phdr->p_type);
+ }
+ }
+
+ if (min_vaddr != 0) {
+ log("%s: Unsupported minimal virtual address: 0x%08" PRIxAddr,
+ elf->GetPath(), min_vaddr);
+ return NULL;
+ }
+ if (!dyn) {
+ log("%s: No PT_DYNAMIC segment found", elf->GetPath());
+ return NULL;
+ }
+
+ /* Reserve enough memory to map the complete virtual address space for this
+ * library.
+ * As we are using the base address from here to mmap something else with
+ * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For
+ * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap
+ * MAP_PRIVATE only returns a 4k aligned address. So we first get a base
+ * address with MAP_SHARED, which guarantees the kernel returns an address
+ * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at
+ * the same address, because of some bad side effects of keeping it as
+ * MAP_SHARED. */
+ elf->base.Assign(mmap(NULL, max_vaddr, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS,
+ -1, 0), max_vaddr);
+ if ((elf->base == MAP_FAILED) ||
+ (mmap(elf->base, max_vaddr, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) {
+ log("%s: Failed to mmap", elf->GetPath());
+ return NULL;
+ }
+
+ /* Load and initialize library */
+ for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
+ it < pt_loads.end(); ++it)
+ if (!elf->LoadSegment(*it))
+ return NULL;
+
+ /* We're not going to mmap anymore */
+ mappable->finalize();
+
+ elf->l_addr = elf->base;
+ elf->l_name = elf->GetPath();
+ elf->l_ld = elf->GetPtr<Dyn>(dyn->p_vaddr);
+ ElfLoader::Singleton.Register(elf);
+
+ if (!elf->InitDyn(dyn))
+ return NULL;
+
+ elf->stats("oneLibLoaded");
+ debug("CustomElf::Load(\"%s\", %x) = %p", path, flags,
+ static_cast<void *>(elf));
+ return elf;
+}
+
+CustomElf::~CustomElf()
+{
+ debug("CustomElf::~CustomElf(%p [\"%s\"])",
+ reinterpret_cast<void *>(this), GetPath());
+ CallFini();
+ /* Normally, __cxa_finalize is called by the .fini function. However,
+ * Android NDK before r6b doesn't do that. Our wrapped cxa_finalize only
+ * calls destructors once, so call it in all cases. */
+ ElfLoader::__wrap_cxa_finalize(this);
+ delete mappable;
+ ElfLoader::Singleton.Forget(this);
+}
+
+namespace {
+
+/**
+ * Hash function for symbol lookup, as defined in ELF standard for System V
+ */
+unsigned long
+ElfHash(const char *symbol)
+{
+ const unsigned char *sym = reinterpret_cast<const unsigned char *>(symbol);
+ unsigned long h = 0, g;
+ while (*sym) {
+ h = (h << 4) + *sym++;
+ if ((g = h & 0xf0000000))
+ h ^= g >> 24;
+ h &= ~g;
+ }
+ return h;
+}
+
+} /* anonymous namespace */
+
+void *
+CustomElf::GetSymbolPtr(const char *symbol) const
+{
+ return GetSymbolPtr(symbol, ElfHash(symbol));
+}
+
+void *
+CustomElf::GetSymbolPtr(const char *symbol, unsigned long hash) const
+{
+ const Sym *sym = GetSymbol(symbol, hash);
+ void *ptr = NULL;
+ if (sym && sym->st_shndx != SHN_UNDEF)
+ ptr = GetPtr(sym->st_value);
+ debug("CustomElf::GetSymbolPtr(%p [\"%s\"], \"%s\") = %p",
+ reinterpret_cast<const void *>(this), GetPath(), symbol, ptr);
+ return ptr;
+}
+
+void *
+CustomElf::GetSymbolPtrInDeps(const char *symbol) const
+{
+ /* Resolve dlopen and related functions to point to ours */
+ if (symbol[0] == 'd' && symbol[1] == 'l') {
+ if (strcmp(symbol + 2, "open") == 0)
+ return FunctionPtr(__wrap_dlopen);
+ if (strcmp(symbol + 2, "error") == 0)
+ return FunctionPtr(__wrap_dlerror);
+ if (strcmp(symbol + 2, "close") == 0)
+ return FunctionPtr(__wrap_dlclose);
+ if (strcmp(symbol + 2, "sym") == 0)
+ return FunctionPtr(__wrap_dlsym);
+ if (strcmp(symbol + 2, "addr") == 0)
+ return FunctionPtr(__wrap_dladdr);
+ if (strcmp(symbol + 2, "_iterate_phdr") == 0)
+ return FunctionPtr(__wrap_dl_iterate_phdr);
+ } else if (symbol[0] == '_' && symbol[1] == '_') {
+ /* Resolve a few C++ ABI specific functions to point to ours */
+#ifdef __ARM_EABI__
+ if (strcmp(symbol + 2, "aeabi_atexit") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_aeabi_atexit);
+#else
+ if (strcmp(symbol + 2, "cxa_atexit") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_cxa_atexit);
+#endif
+ if (strcmp(symbol + 2, "cxa_finalize") == 0)
+ return FunctionPtr(&ElfLoader::__wrap_cxa_finalize);
+ if (strcmp(symbol + 2, "dso_handle") == 0)
+ return const_cast<CustomElf *>(this);
+ if (strcmp(symbol + 2, "moz_linker_stats") == 0)
+ return FunctionPtr(&ElfLoader::stats);
+ } else if (symbol[0] == 's' && symbol[1] == 'i') {
+ if (strcmp(symbol + 2, "gnal") == 0)
+ return FunctionPtr(__wrap_signal);
+ if (strcmp(symbol + 2, "gaction") == 0)
+ return FunctionPtr(__wrap_sigaction);
+ }
+
+ void *sym;
+ /* Search the symbol in the main program. Note this also tries all libraries
+ * the system linker will have loaded RTLD_GLOBAL. Unfortunately, that doesn't
+ * work with bionic, but its linker doesn't normally search the main binary
+ * anyways. Moreover, on android, the main binary is dalvik. */
+#ifdef __GLIBC__
+ sym = dlsym(RTLD_DEFAULT, symbol);
+ debug("dlsym(RTLD_DEFAULT, \"%s\") = %p", symbol, sym);
+ if (sym)
+ return sym;
+#endif
+
+ /* Then search the symbol in our dependencies. Since we already searched in
+ * libraries the system linker loaded, skip those (on glibc systems). We
+ * also assume the symbol is to be found in one of the dependent libraries
+ * directly, not in their own dependent libraries. Building libraries with
+ * --no-allow-shlib-undefined ensures such indirect symbol dependency don't
+ * happen. */
+ unsigned long hash = ElfHash(symbol);
+ for (std::vector<RefPtr<LibHandle> >::const_iterator it = dependencies.begin();
+ it < dependencies.end(); ++it) {
+ if (!(*it)->IsSystemElf()) {
+ sym = reinterpret_cast<CustomElf *>((*it).get())->GetSymbolPtr(symbol, hash);
+#ifndef __GLIBC__
+ } else {
+ sym = (*it)->GetSymbolPtr(symbol);
+#endif
+ }
+ if (sym)
+ return sym;
+ }
+ return NULL;
+}
+
+const Sym *
+CustomElf::GetSymbol(const char *symbol, unsigned long hash) const
+{
+ /* Search symbol with the buckets and chains tables.
+ * The hash computed from the symbol name gives an index in the buckets
+ * table. The corresponding value in the bucket table is an index in the
+ * symbols table and in the chains table.
+ * If the corresponding symbol in the symbols table matches, we're done.
+ * Otherwise, the corresponding value in the chains table is a new index
+ * in both tables, which corresponding symbol is tested and so on and so
+ * forth */
+ size_t bucket = hash % buckets.numElements();
+ for (size_t y = buckets[bucket]; y != STN_UNDEF; y = chains[y]) {
+ if (strcmp(symbol, strtab.GetStringAt(symtab[y].st_name)))
+ continue;
+ return &symtab[y];
+ }
+ return NULL;
+}
+
+bool
+CustomElf::Contains(void *addr) const
+{
+ return base.Contains(addr);
+}
+
+void
+CustomElf::stats(const char *when) const
+{
+ mappable->stats(when, GetPath());
+}
+
+bool
+CustomElf::LoadSegment(const Phdr *pt_load) const
+{
+ if (pt_load->p_type != PT_LOAD) {
+ debug("%s: Elf::LoadSegment only takes PT_LOAD program headers", GetPath());
+ return false;;
+ }
+
+ int prot = ((pt_load->p_flags & PF_X) ? PROT_EXEC : 0) |
+ ((pt_load->p_flags & PF_W) ? PROT_WRITE : 0) |
+ ((pt_load->p_flags & PF_R) ? PROT_READ : 0);
+
+ /* Mmap at page boundary */
+ Addr align = PAGE_SIZE;
+ void *mapped, *where;
+ do {
+ Addr align_offset = pt_load->p_vaddr & (align - 1);
+ where = GetPtr(pt_load->p_vaddr - align_offset);
+ debug("%s: Loading segment @%p %c%c%c", GetPath(), where,
+ prot & PROT_READ ? 'r' : '-',
+ prot & PROT_WRITE ? 'w' : '-',
+ prot & PROT_EXEC ? 'x' : '-');
+ mapped = mappable->mmap(where, pt_load->p_filesz + align_offset,
+ prot, MAP_PRIVATE | MAP_FIXED,
+ pt_load->p_offset - align_offset);
+ if ((mapped != MAP_FAILED) || (pt_load->p_vaddr == 0) ||
+ (pt_load->p_align == align))
+ break;
+ /* The virtual address space for the library is properly aligned at
+ * 16k on ARMv6 (see CustomElf::Load), and so is the first segment
+ * (p_vaddr == 0). But subsequent segments may not be 16k aligned
+ * and fail to mmap. In such case, try to mmap again at the p_align
+ * boundary instead of page boundary. */
+ debug("%s: Failed to mmap, retrying");
+ align = pt_load->p_align;
+ } while (1);
+
+ if (mapped != where) {
+ if (mapped == MAP_FAILED) {
+ log("%s: Failed to mmap", GetPath());
+ } else {
+ log("%s: Didn't map at the expected location (wanted: %p, got: %p)",
+ GetPath(), where, mapped);
+ }
+ return false;
+ }
+
+ /* When p_memsz is greater than p_filesz, we need to have nulled out memory
+ * after p_filesz and before p_memsz.
+ * Mappable::mmap already guarantees that after p_filesz and up to the end
+ * of the page p_filesz is in, memory is nulled out.
+ * Above the end of that page, and up to p_memsz, we already have nulled out
+ * memory because we mapped anonymous memory on the whole library virtual
+ * address space. We just need to adjust this anonymous memory protection
+ * flags. */
+ if (pt_load->p_memsz > pt_load->p_filesz) {
+ Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
+ Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
+ Addr next_page = (file_end & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
+ if (mem_end > next_page) {
+ if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
+ log("%s: Failed to mprotect", GetPath());
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+namespace {
+
+void debug_dyn(const char *type, const Dyn *dyn)
+{
+ debug("%s 0x%08" PRIxAddr, type, dyn->d_un.d_val);
+}
+
+} /* anonymous namespace */
+
+bool
+CustomElf::InitDyn(const Phdr *pt_dyn)
+{
+ /* Scan PT_DYNAMIC segment and gather some information */
+ const Dyn *first_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr);
+ const Dyn *end_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr + pt_dyn->p_filesz);
+ std::vector<Word> dt_needed;
+ size_t symnum = 0;
+ for (const Dyn *dyn = first_dyn; dyn < end_dyn && dyn->d_tag; dyn++) {
+ switch (dyn->d_tag) {
+ case DT_NEEDED:
+ debug_dyn("DT_NEEDED", dyn);
+ dt_needed.push_back(dyn->d_un.d_val);
+ break;
+ case DT_HASH:
+ {
+ debug_dyn("DT_HASH", dyn);
+ const Word *hash_table_header = GetPtr<Word>(dyn->d_un.d_ptr);
+ symnum = hash_table_header[1];
+ buckets.Init(&hash_table_header[2], hash_table_header[0]);
+ chains.Init(&*buckets.end());
+ }
+ break;
+ case DT_STRTAB:
+ debug_dyn("DT_STRTAB", dyn);
+ strtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_SYMTAB:
+ debug_dyn("DT_SYMTAB", dyn);
+ symtab.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_SYMENT:
+ debug_dyn("DT_SYMENT", dyn);
+ if (dyn->d_un.d_val != sizeof(Sym)) {
+ log("%s: Unsupported DT_SYMENT", GetPath());
+ return false;
+ }
+ break;
+ case DT_TEXTREL:
+ log("%s: Text relocations are not supported", GetPath());
+ return false;
+ case DT_STRSZ: /* Ignored */
+ debug_dyn("DT_STRSZ", dyn);
+ break;
+ case UNSUPPORTED_RELOC():
+ case UNSUPPORTED_RELOC(SZ):
+ case UNSUPPORTED_RELOC(ENT):
+ log("%s: Unsupported relocations", GetPath());
+ return false;
+ case RELOC():
+ debug_dyn(STR_RELOC(), dyn);
+ relocations.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case RELOC(SZ):
+ debug_dyn(STR_RELOC(SZ), dyn);
+ relocations.InitSize(dyn->d_un.d_val);
+ break;
+ case RELOC(ENT):
+ debug_dyn(STR_RELOC(ENT), dyn);
+ if (dyn->d_un.d_val != sizeof(Reloc)) {
+ log("%s: Unsupported DT_RELENT", GetPath());
+ return false;
+ }
+ break;
+ case DT_JMPREL:
+ debug_dyn("DT_JMPREL", dyn);
+ jumprels.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_PLTRELSZ:
+ debug_dyn("DT_PLTRELSZ", dyn);
+ jumprels.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_PLTGOT:
+ debug_dyn("DT_PLTGOT", dyn);
+ break;
+ case DT_INIT:
+ debug_dyn("DT_INIT", dyn);
+ init = dyn->d_un.d_ptr;
+ break;
+ case DT_INIT_ARRAY:
+ debug_dyn("DT_INIT_ARRAY", dyn);
+ init_array.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_INIT_ARRAYSZ:
+ debug_dyn("DT_INIT_ARRAYSZ", dyn);
+ init_array.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_FINI:
+ debug_dyn("DT_FINI", dyn);
+ fini = dyn->d_un.d_ptr;
+ break;
+ case DT_FINI_ARRAY:
+ debug_dyn("DT_FINI_ARRAY", dyn);
+ fini_array.Init(GetPtr(dyn->d_un.d_ptr));
+ break;
+ case DT_FINI_ARRAYSZ:
+ debug_dyn("DT_FINI_ARRAYSZ", dyn);
+ fini_array.InitSize(dyn->d_un.d_val);
+ break;
+ case DT_PLTREL:
+ if (dyn->d_un.d_val != RELOC()) {
+ log("%s: Error: DT_PLTREL is not " STR_RELOC(), GetPath());
+ return false;
+ }
+ break;
+ case DT_FLAGS:
+ {
+ Word flags = dyn->d_un.d_val;
+ /* Treat as a DT_TEXTREL tag */
+ if (flags & DF_TEXTREL) {
+ log("%s: Text relocations are not supported", GetPath());
+ return false;
+ }
+ /* we can treat this like having a DT_SYMBOLIC tag */
+ flags &= ~DF_SYMBOLIC;
+ if (flags)
+ log("%s: Warning: unhandled flags #%" PRIxAddr" not handled",
+ GetPath(), flags);
+ }
+ break;
+ case DT_SONAME: /* Should match GetName(), but doesn't matter */
+ case DT_SYMBOLIC: /* Indicates internal symbols should be looked up in
+ * the library itself first instead of the executable,
+ * which is actually what this linker does by default */
+ case RELOC(COUNT): /* Indicates how many relocations are relative, which
+ * is usually used to skip relocations on prelinked
+ * libraries. They are not supported anyways. */
+ case UNSUPPORTED_RELOC(COUNT): /* This should error out, but it doesn't
+ * really matter. */
+ case DT_VERSYM: /* DT_VER* entries are used for symbol versioning, which */
+ case DT_VERDEF: /* this linker doesn't support yet. */
+ case DT_VERDEFNUM:
+ case DT_VERNEED:
+ case DT_VERNEEDNUM:
+ /* Ignored */
+ break;
+ default:
+ log("%s: Warning: dynamic header type #%" PRIxAddr" not handled",
+ GetPath(), dyn->d_tag);
+ }
+ }
+
+ if (!buckets || !symnum) {
+ log("%s: Missing or broken DT_HASH", GetPath());
+ return false;
+ }
+ if (!strtab) {
+ log("%s: Missing DT_STRTAB", GetPath());
+ return false;
+ }
+ if (!symtab) {
+ log("%s: Missing DT_SYMTAB", GetPath());
+ return false;
+ }
+
+ /* Load dependent libraries */
+ for (size_t i = 0; i < dt_needed.size(); i++) {
+ const char *name = strtab.GetStringAt(dt_needed[i]);
+ RefPtr<LibHandle> handle =
+ ElfLoader::Singleton.Load(name, RTLD_GLOBAL | RTLD_LAZY, this);
+ if (!handle)
+ return false;
+ dependencies.push_back(handle);
+ }
+
+ /* Finish initialization */
+ return Relocate() && RelocateJumps() && CallInit();
+}
+
+bool
+CustomElf::Relocate()
+{
+ debug("Relocate %s @%p", GetPath(), static_cast<void *>(base));
+ for (Array<Reloc>::iterator rel = relocations.begin();
+ rel < relocations.end(); ++rel) {
+ /* Location of the relocation */
+ void *ptr = GetPtr(rel->r_offset);
+
+ /* R_*_RELATIVE relocations apply directly at the given location */
+ if (ELF_R_TYPE(rel->r_info) == R_RELATIVE) {
+ *(void **) ptr = GetPtr(rel->GetAddend(base));
+ continue;
+ }
+ /* Other relocation types need a symbol resolution */
+ const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
+ void *symptr;
+ if (sym.st_shndx != SHN_UNDEF) {
+ symptr = GetPtr(sym.st_value);
+ } else {
+ /* TODO: avoid symbol resolution when it's the same symbol as last
+ * iteration */
+ /* TODO: handle symbol resolving to NULL vs. being undefined. */
+ symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
+ }
+
+ if (symptr == NULL)
+ log("%s: Warning: relocation to NULL @0x%08" PRIxAddr,
+ GetPath(), rel->r_offset);
+
+ /* Apply relocation */
+ switch (ELF_R_TYPE(rel->r_info)) {
+ case R_GLOB_DAT:
+ /* R_*_GLOB_DAT relocations simply use the symbol value */
+ *(void **) ptr = symptr;
+ break;
+ case R_ABS:
+ /* R_*_ABS* relocations add the relocation added to the symbol value */
+ *(const char **) ptr = (const char *)symptr + rel->GetAddend(base);
+ break;
+ default:
+ log("%s: Unsupported relocation type: 0x%" PRIxAddr,
+ GetPath(), ELF_R_TYPE(rel->r_info));
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+CustomElf::RelocateJumps()
+{
+ /* TODO: Dynamic symbol resolution */
+ for (Array<Reloc>::iterator rel = jumprels.begin();
+ rel < jumprels.end(); ++rel) {
+ /* Location of the relocation */
+ void *ptr = GetPtr(rel->r_offset);
+
+ /* Only R_*_JMP_SLOT relocations are expected */
+ if (ELF_R_TYPE(rel->r_info) != R_JMP_SLOT) {
+ log("%s: Jump relocation type mismatch", GetPath());
+ return false;
+ }
+
+ /* TODO: Avoid code duplication with the relocations above */
+ const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
+ void *symptr;
+ if (sym.st_shndx != SHN_UNDEF)
+ symptr = GetPtr(sym.st_value);
+ else
+ symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
+
+ if (symptr == NULL) {
+ log("%s: %s: relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"",
+ GetPath(),
+ (ELF_ST_BIND(sym.st_info) == STB_WEAK) ? "Warning" : "Error",
+ rel->r_offset, strtab.GetStringAt(sym.st_name));
+ if (ELF_ST_BIND(sym.st_info) != STB_WEAK)
+ return false;
+ }
+ /* Apply relocation */
+ *(void **) ptr = symptr;
+ }
+ return true;
+}
+
+bool
+CustomElf::CallInit()
+{
+ if (init)
+ CallFunction(init);
+
+ for (Array<void *>::iterator it = init_array.begin();
+ it < init_array.end(); ++it) {
+ /* Android x86 NDK wrongly puts 0xffffffff in INIT_ARRAY */
+ if (*it && *it != reinterpret_cast<void *>(-1))
+ CallFunction(*it);
+ }
+ initialized = true;
+ return true;
+}
+
+void
+CustomElf::CallFini()
+{
+ if (!initialized)
+ return;
+ for (Array<void *>::reverse_iterator it = fini_array.rbegin();
+ it < fini_array.rend(); ++it) {
+ /* Android x86 NDK wrongly puts 0xffffffff in FINI_ARRAY */
+ if (*it && *it != reinterpret_cast<void *>(-1))
+ CallFunction(*it);
+ }
+ if (fini)
+ CallFunction(fini);
+}
diff --git a/sal/android/faulty.lib/linker/CustomElf.h b/sal/android/faulty.lib/linker/CustomElf.h
new file mode 100644
index 000000000000..74f789b27c3f
--- /dev/null
+++ b/sal/android/faulty.lib/linker/CustomElf.h
@@ -0,0 +1,191 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef CustomElf_h
+#define CustomElf_h
+
+#include "ElfLoader.h"
+#include "Logging.h"
+#include "Elfxx.h"
+
+class Mappable;
+
+/**
+ * Library Handle class for ELF libraries we don't let the system linker
+ * handle.
+ */
+class CustomElf: public LibHandle, private ElfLoader::link_map
+{
+ friend class ElfLoader;
+ friend class SEGVHandler;
+public:
+ /**
+ * Returns a new CustomElf using the given file descriptor to map ELF
+ * content. The file descriptor ownership is stolen, and it will be closed
+ * in CustomElf's destructor if an instance is created, or by the Load
+ * method otherwise. The path corresponds to the file descriptor, and flags
+ * are the same kind of flags that would be given to dlopen(), though
+ * currently, none are supported and the behaviour is more or less that of
+ * RTLD_GLOBAL | RTLD_BIND_NOW.
+ */
+ static mozilla::TemporaryRef<LibHandle> Load(Mappable *mappable,
+ const char *path, int flags);
+
+ /**
+ * Inherited from LibHandle
+ */
+ virtual ~CustomElf();
+ virtual void *GetSymbolPtr(const char *symbol) const;
+ virtual bool Contains(void *addr) const;
+
+ /**
+ * Shows some stats about the Mappable instance. The when argument is to be
+ * used by the caller to give an identifier of the when the stats call is
+ * made.
+ */
+ void stats(const char *when) const;
+
+private:
+ /**
+ * Returns a pointer to the Elf Symbol in the Dynamic Symbol table
+ * corresponding to the given symbol name (with a pre-computed hash).
+ */
+ const Elf::Sym *GetSymbol(const char *symbol, unsigned long hash) const;
+
+ /**
+ * Returns the address corresponding to the given symbol name (with a
+ * pre-computed hash).
+ */
+ void *GetSymbolPtr(const char *symbol, unsigned long hash) const;
+
+ /**
+ * Scan dependent libraries to find the address corresponding to the
+ * given symbol name. This is used to find symbols that are undefined
+ * in the Elf object.
+ */
+ void *GetSymbolPtrInDeps(const char *symbol) const;
+
+ /**
+ * Private constructor
+ */
+ CustomElf(Mappable *mappable, const char *path)
+ : LibHandle(path), mappable(mappable), init(0), fini(0), initialized(false)
+ { }
+
+ /**
+ * Returns a pointer relative to the base address where the library is
+ * loaded.
+ */
+ void *GetPtr(const Elf::Addr offset) const
+ {
+ return base + offset;
+ }
+
+ /**
+ * Like the above, but returns a typed (const) pointer
+ */
+ template <typename T>
+ const T *GetPtr(const Elf::Addr offset) const
+ {
+ return reinterpret_cast<const T *>(base + offset);
+ }
+
+ /**
+ * Loads an Elf segment defined by the given PT_LOAD header.
+ * Returns whether this succeeded or failed.
+ */
+ bool LoadSegment(const Elf::Phdr *pt_load) const;
+
+ /**
+ * Initializes the library according to information found in the given
+ * PT_DYNAMIC header.
+ * Returns whether this succeeded or failed.
+ */
+ bool InitDyn(const Elf::Phdr *pt_dyn);
+
+ /**
+ * Apply .rel.dyn/.rela.dyn relocations.
+ * Returns whether this succeeded or failed.
+ */
+ bool Relocate();
+
+ /**
+ * Apply .rel.plt/.rela.plt relocations.
+ * Returns whether this succeeded or failed.
+ */
+ bool RelocateJumps();
+
+ /**
+ * Call initialization functions (.init/.init_array)
+ * Returns true;
+ */
+ bool CallInit();
+
+ /**
+ * Call destructor functions (.fini_array/.fini)
+ * Returns whether this succeeded or failed.
+ */
+ void CallFini();
+
+ /**
+ * Call a function given a pointer to its location.
+ */
+ void CallFunction(void *ptr) const
+ {
+ /* C++ doesn't allow direct conversion between pointer-to-object
+ * and pointer-to-function. */
+ union {
+ void *ptr;
+ void (*func)(void);
+ } f;
+ f.ptr = ptr;
+ debug("%s: Calling function @%p", GetPath(), ptr);
+ f.func();
+ }
+
+ /**
+ * Call a function given a an address relative to the library base
+ */
+ void CallFunction(Elf::Addr addr) const
+ {
+ return CallFunction(GetPtr(addr));
+ }
+
+ /* Appropriated Mappable */
+ Mappable *mappable;
+
+ /* Base address where the library is loaded */
+ MappedPtr base;
+
+ /* String table */
+ Elf::Strtab strtab;
+
+ /* Symbol table */
+ UnsizedArray<Elf::Sym> symtab;
+
+ /* Buckets and chains for the System V symbol hash table */
+ Array<Elf::Word> buckets;
+ UnsizedArray<Elf::Word> chains;
+
+ /* List of dependent libraries */
+ std::vector<mozilla::RefPtr<LibHandle> > dependencies;
+
+ /* List of .rel.dyn/.rela.dyn relocations */
+ Array<Elf::Reloc> relocations;
+
+ /* List of .rel.plt/.rela.plt relocation */
+ Array<Elf::Reloc> jumprels;
+
+ /* Relative address of the initialization and destruction functions
+ * (.init/.fini) */
+ Elf::Addr init, fini;
+
+ /* List of initialization and destruction functions
+ * (.init_array/.fini_array) */
+ Array<void *> init_array, fini_array;
+
+ bool initialized;
+};
+
+#endif /* CustomElf_h */
diff --git a/sal/android/faulty.lib/linker/ElfLoader.cxx b/sal/android/faulty.lib/linker/ElfLoader.cxx
new file mode 100644
index 000000000000..7cdb1bd48fbf
--- /dev/null
+++ b/sal/android/faulty.lib/linker/ElfLoader.cxx
@@ -0,0 +1,723 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <cstring>
+#include <cstdlib>
+#include <dlfcn.h>
+#include <unistd.h>
+#include <algorithm>
+#include <fcntl.h>
+#include "ElfLoader.h"
+#include "CustomElf.h"
+#include "Mappable.h"
+#include "Logging.h"
+
+#if defined(ANDROID) && ANDROID_VERSION < 8
+/* Android API < 8 doesn't provide sigaltstack */
+#include <sys/syscall.h>
+
+extern "C" {
+
+inline int sigaltstack(const stack_t *ss, stack_t *oss) {
+ return syscall(__NR_sigaltstack, ss, oss);
+}
+
+} /* extern "C" */
+#endif
+
+using namespace mozilla;
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (~ (PAGE_SIZE - 1))
+#endif
+
+/**
+ * dlfcn.h replacements functions
+ */
+
+void *
+__wrap_dlopen(const char *path, int flags)
+{
+ RefPtr<LibHandle> handle = ElfLoader::Singleton.Load(path, flags);
+ if (handle)
+ handle->AddDirectRef();
+ return handle;
+}
+
+const char *
+__wrap_dlerror(void)
+{
+ const char *error = ElfLoader::Singleton.lastError;
+ ElfLoader::Singleton.lastError = NULL;
+ return error;
+}
+
+void *
+__wrap_dlsym(void *handle, const char *symbol)
+{
+ if (!handle) {
+ ElfLoader::Singleton.lastError = "dlsym(NULL, sym) unsupported";
+ return NULL;
+ }
+ if (handle != RTLD_DEFAULT && handle != RTLD_NEXT) {
+ LibHandle *h = reinterpret_cast<LibHandle *>(handle);
+ return h->GetSymbolPtr(symbol);
+ }
+ return dlsym(handle, symbol);
+}
+
+int
+__wrap_dlclose(void *handle)
+{
+ if (!handle) {
+ ElfLoader::Singleton.lastError = "No handle given to dlclose()";
+ return -1;
+ }
+ reinterpret_cast<LibHandle *>(handle)->ReleaseDirectRef();
+ return 0;
+}
+
+int
+__wrap_dladdr(void *addr, Dl_info *info)
+{
+ RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(addr);
+ if (!handle)
+ return 0;
+ info->dli_fname = handle->GetPath();
+ return 1;
+}
+
+int
+__wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data)
+{
+ if (ElfLoader::Singleton.dbg == NULL)
+ return -1;
+
+ for (ElfLoader::r_debug::iterator it = ElfLoader::Singleton.dbg->begin();
+ it < ElfLoader::Singleton.dbg->end(); ++it) {
+ dl_phdr_info info;
+ info.dlpi_addr = reinterpret_cast<Elf::Addr>(it->l_addr);
+ info.dlpi_name = it->l_name;
+ info.dlpi_phdr = NULL;
+ info.dlpi_phnum = 0;
+ int ret = callback(&info, sizeof(dl_phdr_info), data);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+namespace {
+
+/**
+ * Returns the part after the last '/' for the given path
+ */
+const char *
+LeafName(const char *path)
+{
+ const char *lastSlash = strrchr(path, '/');
+ if (lastSlash)
+ return lastSlash + 1;
+ return path;
+}
+
+} /* Anonymous namespace */
+
+/**
+ * LibHandle
+ */
+LibHandle::~LibHandle()
+{
+ free(path);
+}
+
+const char *
+LibHandle::GetName() const
+{
+ return path ? LeafName(path) : NULL;
+}
+
+/**
+ * SystemElf
+ */
+TemporaryRef<LibHandle>
+SystemElf::Load(const char *path, int flags)
+{
+ /* The Android linker returns a handle when the file name matches an
+ * already loaded library, even when the full path doesn't exist */
+ if (path && path[0] == '/' && (access(path, F_OK) == -1)){
+ debug("dlopen(\"%s\", %x) = %p", path, flags, (void *)NULL);
+ return NULL;
+ }
+
+ void *handle = dlopen(path, flags);
+ debug("dlopen(\"%s\", %x) = %p", path, flags, handle);
+ ElfLoader::Singleton.lastError = dlerror();
+ if (handle) {
+ SystemElf *elf = new SystemElf(path, handle);
+ ElfLoader::Singleton.Register(elf);
+ return elf;
+ }
+ return NULL;
+}
+
+SystemElf::~SystemElf()
+{
+ if (!dlhandle)
+ return;
+ debug("dlclose(%p [\"%s\"])", dlhandle, GetPath());
+ dlclose(dlhandle);
+ ElfLoader::Singleton.lastError = dlerror();
+ ElfLoader::Singleton.Forget(this);
+}
+
+void *
+SystemElf::GetSymbolPtr(const char *symbol) const
+{
+ void *sym = dlsym(dlhandle, symbol);
+ debug("dlsym(%p [\"%s\"], \"%s\") = %p", dlhandle, GetPath(), symbol, sym);
+ ElfLoader::Singleton.lastError = dlerror();
+ return sym;
+}
+
+/**
+ * ElfLoader
+ */
+
+/* Unique ElfLoader instance */
+ElfLoader ElfLoader::Singleton;
+
+TemporaryRef<LibHandle>
+ElfLoader::Load(const char *path, int flags, LibHandle *parent)
+{
+ RefPtr<LibHandle> handle;
+
+ /* Handle dlopen(NULL) directly. */
+ if (!path) {
+ handle = SystemElf::Load(NULL, flags);
+ return handle;
+ }
+
+ /* TODO: Handle relative paths correctly */
+ const char *name = LeafName(path);
+
+ /* Search the list of handles we already have for a match. When the given
+ * path is not absolute, compare file names, otherwise compare full paths. */
+ if (name == path) {
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
+ if ((*it)->GetName() && (strcmp((*it)->GetName(), name) == 0))
+ return *it;
+ } else {
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
+ if ((*it)->GetPath() && (strcmp((*it)->GetPath(), path) == 0))
+ return *it;
+ }
+
+ char *abs_path = NULL;
+ const char *requested_path = path;
+
+ /* When the path is not absolute and the library is being loaded for
+ * another, first try to load the library from the directory containing
+ * that parent library. */
+ if ((name == path) && parent) {
+ const char *parentPath = parent->GetPath();
+ abs_path = new char[strlen(parentPath) + strlen(path)];
+ strcpy(abs_path, parentPath);
+ char *slash = strrchr(abs_path, '/');
+ strcpy(slash + 1, path);
+ path = abs_path;
+ }
+
+ /* Create a mappable object for the given path. Paths in the form
+ * /foo/bar/baz/archive!/directory/lib.so
+ * try to load the directory/lib.so in /foo/bar/baz/archive, provided
+ * that file is a Zip archive. */
+ Mappable *mappable = NULL;
+ RefPtr<Zip> zip;
+ const char *subpath;
+ if ((subpath = strchr(path, '!'))) {
+ char *zip_path = strndup(path, subpath - path);
+ while (*(++subpath) == '/') { }
+ zip = zips.GetZip(zip_path);
+ Zip::Stream s;
+ if (zip && zip->GetStream(subpath, &s)) {
+ /* When the MOZ_LINKER_EXTRACT environment variable is set to "1",
+ * compressed libraries are going to be (temporarily) extracted as
+ * files, in the directory pointed by the MOZ_LINKER_CACHE
+ * environment variable. */
+ const char *extract = getenv("MOZ_LINKER_EXTRACT");
+ if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
+ mappable = MappableExtractFile::Create(name, zip, &s);
+ if (!mappable) {
+ if (s.GetType() == Zip::Stream::DEFLATE) {
+ mappable = MappableDeflate::Create(name, zip, &s);
+ } else if (s.GetType() == Zip::Stream::STORE) {
+ mappable = MappableSeekableZStream::Create(name, zip, &s);
+ }
+ }
+ }
+ }
+ /* If we couldn't load above, try with a MappableFile */
+ if (!mappable && !zip)
+ mappable = MappableFile::Create(path);
+
+ /* Try loading with the custom linker if we have a Mappable */
+ if (mappable)
+ handle = CustomElf::Load(mappable, path, flags);
+
+ /* Try loading with the system linker if everything above failed */
+ if (!handle)
+ handle = SystemElf::Load(path, flags);
+
+ /* If we didn't have an absolute path and haven't been able to load
+ * a library yet, try in the system search path */
+ if (!handle && abs_path)
+ handle = SystemElf::Load(name, flags);
+
+ delete [] abs_path;
+ debug("ElfLoader::Load(\"%s\", 0x%x, %p [\"%s\"]) = %p", requested_path, flags,
+ reinterpret_cast<void *>(parent), parent ? parent->GetPath() : "",
+ static_cast<void *>(handle));
+
+ return handle;
+}
+
+mozilla::TemporaryRef<LibHandle>
+ElfLoader::GetHandleByPtr(void *addr)
+{
+ /* Scan the list of handles we already have for a match */
+ for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it) {
+ if ((*it)->Contains(addr))
+ return *it;
+ }
+ return NULL;
+}
+
+void
+ElfLoader::Register(LibHandle *handle)
+{
+ handles.push_back(handle);
+ if (dbg && !handle->IsSystemElf())
+ dbg->Add(static_cast<CustomElf *>(handle));
+}
+
+void
+ElfLoader::Forget(LibHandle *handle)
+{
+ LibHandleList::iterator it = std::find(handles.begin(), handles.end(), handle);
+ if (it != handles.end()) {
+ debug("ElfLoader::Forget(%p [\"%s\"])", reinterpret_cast<void *>(handle),
+ handle->GetPath());
+ if (dbg && !handle->IsSystemElf())
+ dbg->Remove(static_cast<CustomElf *>(handle));
+ handles.erase(it);
+ } else {
+ debug("ElfLoader::Forget(%p [\"%s\"]): Handle not found",
+ reinterpret_cast<void *>(handle), handle->GetPath());
+ }
+}
+
+ElfLoader::~ElfLoader()
+{
+ LibHandleList list;
+ /* Build up a list of all library handles with direct (external) references.
+ * We actually skip system library handles because we want to keep at least
+ * some of these open. Most notably, Mozilla codebase keeps a few libgnome
+ * libraries deliberately open because of the mess that libORBit destruction
+ * is. dlclose()ing these libraries actually leads to problems. */
+ for (LibHandleList::reverse_iterator it = handles.rbegin();
+ it < handles.rend(); ++it) {
+ if ((*it)->DirectRefCount()) {
+ if ((*it)->IsSystemElf()) {
+ static_cast<SystemElf *>(*it)->Forget();
+ } else {
+ list.push_back(*it);
+ }
+ }
+ }
+ /* Force release all external references to the handles collected above */
+ for (LibHandleList::iterator it = list.begin(); it < list.end(); ++it) {
+ while ((*it)->ReleaseDirectRef()) { }
+ }
+ /* Remove the remaining system handles. */
+ if (handles.size()) {
+ list = handles;
+ for (LibHandleList::reverse_iterator it = list.rbegin();
+ it < list.rend(); ++it) {
+ if ((*it)->IsSystemElf()) {
+ debug("ElfLoader::~ElfLoader(): Remaining handle for \"%s\" "
+ "[%d direct refs, %d refs total]", (*it)->GetPath(),
+ (*it)->DirectRefCount(), (*it)->refCount());
+ } else {
+ debug("ElfLoader::~ElfLoader(): Unexpected remaining handle for \"%s\" "
+ "[%d direct refs, %d refs total]", (*it)->GetPath(),
+ (*it)->DirectRefCount(), (*it)->refCount());
+ /* Not removing, since it could have references to other libraries,
+ * destroying them as a side effect, and possibly leaving dangling
+ * pointers in the handle list we're scanning */
+ }
+ }
+ }
+}
+
+void
+ElfLoader::stats(const char *when)
+{
+ for (LibHandleList::iterator it = Singleton.handles.begin();
+ it < Singleton.handles.end(); ++it)
+ if (!(*it)->IsSystemElf())
+ static_cast<CustomElf *>(*it)->stats(when);
+}
+
+#ifdef __ARM_EABI__
+int
+ElfLoader::__wrap_aeabi_atexit(void *that, ElfLoader::Destructor destructor,
+ void *dso_handle)
+{
+ Singleton.destructors.push_back(
+ DestructorCaller(destructor, that, dso_handle));
+ return 0;
+}
+#else
+int
+ElfLoader::__wrap_cxa_atexit(ElfLoader::Destructor destructor, void *that,
+ void *dso_handle)
+{
+ Singleton.destructors.push_back(
+ DestructorCaller(destructor, that, dso_handle));
+ return 0;
+}
+#endif
+
+void
+ElfLoader::__wrap_cxa_finalize(void *dso_handle)
+{
+ /* Call all destructors for the given DSO handle in reverse order they were
+ * registered. */
+ std::vector<DestructorCaller>::reverse_iterator it;
+ for (it = Singleton.destructors.rbegin();
+ it < Singleton.destructors.rend(); ++it) {
+ if (it->IsForHandle(dso_handle)) {
+ it->Call();
+ }
+ }
+}
+
+void
+ElfLoader::DestructorCaller::Call()
+{
+ if (destructor) {
+ debug("ElfLoader::DestructorCaller::Call(%p, %p, %p)",
+ FunctionPtr(destructor), object, dso_handle);
+ destructor(object);
+ destructor = NULL;
+ }
+}
+
+void
+ElfLoader::InitDebugger()
+{
+ /* Find ELF auxiliary vectors.
+ *
+ * The kernel stores the following data on the stack when starting a
+ * program:
+ * argc
+ * argv[0] (pointer into argv strings defined below)
+ * argv[1] (likewise)
+ * ...
+ * argv[argc - 1] (likewise)
+ * NULL
+ * envp[0] (pointer into environment strings defined below)
+ * envp[1] (likewise)
+ * ...
+ * envp[n] (likewise)
+ * NULL
+ * auxv[0] (first ELF auxiliary vector)
+ * auxv[1] (second ELF auxiliary vector)
+ * ...
+ * auxv[p] (last ELF auxiliary vector)
+ * (AT_NULL, NULL)
+ * padding
+ * argv strings, separated with '\0'
+ * environment strings, separated with '\0'
+ * NULL
+ *
+ * What we are after are the auxv values defined by the following struct.
+ */
+ struct AuxVector {
+ Elf::Addr type;
+ Elf::Addr value;
+ };
+
+ /* Pointer to the environment variables list */
+ extern char **environ;
+
+ /* The environment may have changed since the program started, in which
+ * case the environ variables list isn't the list the kernel put on stack
+ * anymore. But in this new list, variables that didn't change still point
+ * to the strings the kernel put on stack. It is quite unlikely that two
+ * modified environment variables point to two consecutive strings in memory,
+ * so we assume that if two consecutive environment variables point to two
+ * consecutive strings, we found strings the kernel put on stack. */
+ char **env;
+ for (env = environ; *env; env++)
+ if (*env + strlen(*env) + 1 == env[1])
+ break;
+ if (!*env)
+ return;
+
+ /* Next, we scan the stack backwards to find a pointer to one of those
+ * strings we found above, which will give us the location of the original
+ * envp list. As we are looking for pointers, we need to look at 32-bits or
+ * 64-bits aligned values, depening on the architecture. */
+ char **scan = reinterpret_cast<char **>(
+ reinterpret_cast<uintptr_t>(*env) & ~(sizeof(void *) - 1));
+ while (*env != *scan)
+ scan--;
+
+ /* Finally, scan forward to find the last environment variable pointer and
+ * thus the first auxiliary vector. */
+ while (*scan++);
+ AuxVector *auxv = reinterpret_cast<AuxVector *>(scan);
+
+ /* The two values of interest in the auxiliary vectors are AT_PHDR and
+ * AT_PHNUM, which gives us the the location and size of the ELF program
+ * headers. */
+ Array<Elf::Phdr> phdrs;
+ char *base = NULL;
+ while (auxv->type) {
+ if (auxv->type == AT_PHDR) {
+ phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
+ /* Assume the base address is the first byte of the same page */
+ base = reinterpret_cast<char *>(auxv->value & PAGE_MASK);
+ }
+ if (auxv->type == AT_PHNUM)
+ phdrs.Init(auxv->value);
+ auxv++;
+ }
+
+ if (!phdrs) {
+ debug("Couldn't find program headers");
+ return;
+ }
+
+ /* In some cases, the address for the program headers we get from the
+ * auxiliary vectors is not mapped, because of the PT_LOAD segments
+ * definitions in the program executable. Trying to map anonymous memory
+ * with a hint giving the base address will return a different address
+ * if something is mapped there, and the base address otherwise. */
+ MappedPtr mem(mmap(base, PAGE_SIZE, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0), PAGE_SIZE);
+ if (mem == base) {
+ /* If program headers aren't mapped, try to map them */
+ int fd = open("/proc/self/exe", O_RDONLY);
+ if (fd == -1) {
+ debug("Failed to open /proc/self/exe");
+ return;
+ }
+ mem.Assign(mmap(base, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0), PAGE_SIZE);
+ /* If we don't manage to map at the right address, just give up. */
+ if (mem != base) {
+ debug("Couldn't read program headers");
+ return;
+ }
+ }
+ /* Sanity check: the first bytes at the base address should be an ELF
+ * header. */
+ if (!Elf::Ehdr::validate(base)) {
+ debug("Couldn't find program base");
+ return;
+ }
+
+ /* Search for the program PT_DYNAMIC segment */
+ Array<Elf::Dyn> dyns;
+ for (Array<Elf::Phdr>::iterator phdr = phdrs.begin(); phdr < phdrs.end();
+ ++phdr) {
+ /* While the program headers are expected within the first mapped page of
+ * the program executable, the executable PT_LOADs may actually make them
+ * loaded at an address that is not the wanted base address of the
+ * library. We thus need to adjust the base address, compensating for the
+ * virtual address of the PT_LOAD segment corresponding to offset 0. */
+ if (phdr->p_type == PT_LOAD && phdr->p_offset == 0)
+ base -= phdr->p_vaddr;
+ if (phdr->p_type == PT_DYNAMIC)
+ dyns.Init(base + phdr->p_vaddr, phdr->p_filesz);
+ }
+ if (!dyns) {
+ debug("Failed to find PT_DYNAMIC section in program");
+ return;
+ }
+
+ /* Search for the DT_DEBUG information */
+ for (Array<Elf::Dyn>::iterator dyn = dyns.begin(); dyn < dyns.end(); ++dyn) {
+ if (dyn->d_tag == DT_DEBUG) {
+ dbg = reinterpret_cast<r_debug *>(dyn->d_un.d_ptr);
+ break;
+ }
+ }
+ debug("DT_DEBUG points at %p", dbg);
+}
+
+/**
+ * The system linker maintains a doubly linked list of library it loads
+ * for use by the debugger. Unfortunately, it also uses the list pointers
+ * in a lot of operations and adding our data in the list is likely to
+ * trigger crashes when the linker tries to use data we don't provide or
+ * that fall off the amount data we allocated. Fortunately, the linker only
+ * traverses the list forward and accesses the head of the list from a
+ * private pointer instead of using the value in the r_debug structure.
+ * This means we can safely add members at the beginning of the list.
+ * Unfortunately, gdb checks the coherency of l_prev values, so we have
+ * to adjust the l_prev value for the first element the system linker
+ * knows about. Fortunately, it doesn't use l_prev, and the first element
+ * is not ever going to be released before our elements, since it is the
+ * program executable, so the system linker should not be changing
+ * r_debug::r_map.
+ */
+void
+ElfLoader::r_debug::Add(ElfLoader::link_map *map)
+{
+ if (!r_brk)
+ return;
+ r_state = RT_ADD;
+ r_brk();
+ map->l_prev = NULL;
+ map->l_next = r_map;
+ r_map->l_prev = map;
+ r_map = map;
+ r_state = RT_CONSISTENT;
+ r_brk();
+}
+
+void
+ElfLoader::r_debug::Remove(ElfLoader::link_map *map)
+{
+ if (!r_brk)
+ return;
+ r_state = RT_DELETE;
+ r_brk();
+ if (r_map == map)
+ r_map = map->l_next;
+ else
+ map->l_prev->l_next = map->l_next;
+ map->l_next->l_prev = map->l_prev;
+ r_state = RT_CONSISTENT;
+ r_brk();
+}
+
+SEGVHandler::SEGVHandler()
+{
+ /* Setup an alternative stack if the already existing one is not big
+ * enough, or if there is none. */
+ if (sigaltstack(NULL, &oldStack) == -1 || !oldStack.ss_sp ||
+ oldStack.ss_size < stackSize) {
+ stackPtr.Assign(mmap(NULL, stackSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0), stackSize);
+ stack_t stack;
+ stack.ss_sp = stackPtr;
+ stack.ss_size = stackSize;
+ stack.ss_flags = 0;
+ sigaltstack(&stack, NULL);
+ }
+ /* Register our own handler, and store the already registered one in
+ * SEGVHandler's struct sigaction member */
+ struct sigaction action;
+ action.sa_sigaction = &SEGVHandler::handler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ action.sa_restorer = NULL;
+ sigaction(SIGSEGV, &action, &this->action);
+}
+
+SEGVHandler::~SEGVHandler()
+{
+ /* Restore alternative stack for signals */
+ sigaltstack(&oldStack, NULL);
+ /* Restore original signal handler */
+ sigaction(SIGSEGV, &this->action, NULL);
+}
+
+/* TODO: "properly" handle signal masks and flags */
+void SEGVHandler::handler(int signum, siginfo_t *info, void *context)
+{
+ //ASSERT(signum == SIGSEGV);
+ debug("Caught segmentation fault @%p", info->si_addr);
+
+ /* Check whether we segfaulted in the address space of a CustomElf. We're
+ * only expecting that to happen as an access error. */
+ if (info->si_code == SEGV_ACCERR) {
+ /* We may segfault when running destructors in CustomElf::~CustomElf, so we
+ * can't hold a RefPtr on the handle. */
+ LibHandle *handle = ElfLoader::Singleton.GetHandleByPtr(info->si_addr).drop();
+ if (handle && !handle->IsSystemElf()) {
+ debug("Within the address space of a CustomElf");
+ CustomElf *elf = static_cast<CustomElf *>(static_cast<LibHandle *>(handle));
+ if (elf->mappable->ensure(info->si_addr))
+ return;
+ }
+ }
+
+ /* Redispatch to the registered handler */
+ SEGVHandler &that = ElfLoader::Singleton;
+ if (that.action.sa_flags & SA_SIGINFO) {
+ debug("Redispatching to registered handler @%p", that.action.sa_sigaction);
+ that.action.sa_sigaction(signum, info, context);
+ } else if (that.action.sa_handler == SIG_DFL) {
+ debug("Redispatching to default handler");
+ /* Reset the handler to the default one, and trigger it. */
+ sigaction(signum, &that.action, NULL);
+ raise(signum);
+ } else if (that.action.sa_handler != SIG_IGN) {
+ debug("Redispatching to registered handler @%p", that.action.sa_handler);
+ that.action.sa_handler(signum);
+ } else {
+ debug("Ignoring");
+ }
+}
+
+sighandler_t
+__wrap_signal(int signum, sighandler_t handler)
+{
+ /* Use system signal() function for all but SIGSEGV signals. */
+ if (signum != SIGSEGV)
+ return signal(signum, handler);
+
+ SEGVHandler &that = ElfLoader::Singleton;
+ union {
+ sighandler_t signal;
+ void (*sigaction)(int, siginfo_t *, void *);
+ } oldHandler;
+
+ /* Keep the previous handler to return its value */
+ if (that.action.sa_flags & SA_SIGINFO) {
+ oldHandler.sigaction = that.action.sa_sigaction;
+ } else {
+ oldHandler.signal = that.action.sa_handler;
+ }
+ /* Set the new handler */
+ that.action.sa_handler = handler;
+ that.action.sa_flags = 0;
+
+ return oldHandler.signal;
+}
+
+int
+__wrap_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact)
+{
+ /* Use system sigaction() function for all but SIGSEGV signals. */
+ if (signum != SIGSEGV)
+ return sigaction(signum, act, oldact);
+
+ SEGVHandler &that = ElfLoader::Singleton;
+ if (oldact)
+ *oldact = that.action;
+ if (act)
+ that.action = *act;
+ return 0;
+}
diff --git a/sal/android/faulty.lib/linker/ElfLoader.h b/sal/android/faulty.lib/linker/ElfLoader.h
new file mode 100644
index 000000000000..2c0a6fa07650
--- /dev/null
+++ b/sal/android/faulty.lib/linker/ElfLoader.h
@@ -0,0 +1,468 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ElfLoader_h
+#define ElfLoader_h
+
+#include <vector>
+#include <dlfcn.h>
+#include <signal.h>
+#include "mozilla/RefPtr.h"
+#include "Zip.h"
+#include "Elfxx.h"
+
+/**
+ * dlfcn.h replacement functions
+ */
+extern "C" {
+ void *__wrap_dlopen(const char *path, int flags);
+ const char *__wrap_dlerror(void);
+ void *__wrap_dlsym(void *handle, const char *symbol);
+ int __wrap_dlclose(void *handle);
+
+#ifndef HAVE_DLADDR
+ typedef struct {
+ const char *dli_fname;
+ void *dli_fbase;
+ const char *dli_sname;
+ void *dli_saddr;
+ } Dl_info;
+#endif
+ int __wrap_dladdr(void *addr, Dl_info *info);
+
+ sighandler_t __wrap_signal(int signum, sighandler_t handler);
+ int __wrap_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact);
+
+ struct dl_phdr_info {
+ Elf::Addr dlpi_addr;
+ const char *dlpi_name;
+ const Elf::Phdr *dlpi_phdr;
+ Elf::Half dlpi_phnum;
+ };
+
+ typedef int (*dl_phdr_cb)(struct dl_phdr_info *, size_t, void *);
+ int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data);
+}
+
+/**
+ * Abstract class for loaded libraries. Libraries may be loaded through the
+ * system linker or this linker, both cases will be derived from this class.
+ */
+class LibHandle: public mozilla::RefCounted<LibHandle>
+{
+public:
+ /**
+ * Constructor. Takes the path of the loaded library and will store a copy
+ * of the leaf name.
+ */
+ LibHandle(const char *path)
+ : directRefCnt(0), path(path ? strdup(path) : NULL) { }
+
+ /**
+ * Destructor.
+ */
+ virtual ~LibHandle();
+
+ /**
+ * Returns the pointer to the address to which the given symbol resolves
+ * inside the library. It is not supposed to resolve the symbol in other
+ * libraries, although in practice, it will for system libraries.
+ */
+ virtual void *GetSymbolPtr(const char *symbol) const = 0;
+
+ /**
+ * Returns whether the given address is part of the virtual address space
+ * covered by the loaded library.
+ */
+ virtual bool Contains(void *addr) const = 0;
+
+ /**
+ * Returns the file name of the library without the containing directory.
+ */
+ const char *GetName() const;
+
+ /**
+ * Returns the full path of the library, when available. Otherwise, returns
+ * the file name.
+ */
+ const char *GetPath() const
+ {
+ return path;
+ }
+
+ /**
+ * Library handles can be referenced from other library handles or
+ * externally (when dlopen()ing using this linker). We need to be
+ * able to distinguish between the two kind of referencing for better
+ * bookkeeping.
+ */
+ void AddDirectRef()
+ {
+ ++directRefCnt;
+ mozilla::RefCounted<LibHandle>::AddRef();
+ }
+
+ /**
+ * Releases a direct reference, and returns whether there are any direct
+ * references left.
+ */
+ bool ReleaseDirectRef()
+ {
+ bool ret = false;
+ if (directRefCnt) {
+ MOZ_ASSERT(directRefCnt <= mozilla::RefCounted<LibHandle>::refCount());
+ if (--directRefCnt)
+ ret = true;
+ mozilla::RefCounted<LibHandle>::Release();
+ }
+ return ret;
+ }
+
+ /**
+ * Returns the number of direct references
+ */
+ int DirectRefCount()
+ {
+ return directRefCnt;
+ }
+
+protected:
+ /**
+ * Returns whether the handle is a SystemElf or not. (short of a better way
+ * to do this without RTTI)
+ */
+ friend class ElfLoader;
+ friend class CustomElf;
+ friend class SEGVHandler;
+ virtual bool IsSystemElf() const { return false; }
+
+private:
+ int directRefCnt;
+ char *path;
+};
+
+/**
+ * Class handling libraries loaded by the system linker
+ */
+class SystemElf: public LibHandle
+{
+public:
+ /**
+ * Returns a new SystemElf for the given path. The given flags are passed
+ * to dlopen().
+ */
+ static mozilla::TemporaryRef<LibHandle> Load(const char *path, int flags);
+
+ /**
+ * Inherited from LibHandle
+ */
+ virtual ~SystemElf();
+ virtual void *GetSymbolPtr(const char *symbol) const;
+ virtual bool Contains(void *addr) const { return false; /* UNIMPLEMENTED */ }
+
+protected:
+ /**
+ * Returns whether the handle is a SystemElf or not. (short of a better way
+ * to do this without RTTI)
+ */
+ friend class ElfLoader;
+ virtual bool IsSystemElf() const { return true; }
+
+ /**
+ * Remove the reference to the system linker handle. This avoids dlclose()
+ * being called when the instance is destroyed.
+ */
+ void Forget()
+ {
+ dlhandle = NULL;
+ }
+
+private:
+ /**
+ * Private constructor
+ */
+ SystemElf(const char *path, void *handle)
+ : LibHandle(path), dlhandle(handle) { }
+
+ /* Handle as returned by system dlopen() */
+ void *dlhandle;
+};
+
+/**
+ * The ElfLoader registers its own SIGSEGV handler to handle segmentation
+ * faults within the address space of the loaded libraries. It however
+ * allows a handler to be set for faults in other places, and redispatches
+ * to the handler set through signal() or sigaction(). We assume no system
+ * library loaded with system dlopen is going to call signal or sigaction
+ * for SIGSEGV.
+ */
+class SEGVHandler
+{
+protected:
+ SEGVHandler();
+ ~SEGVHandler();
+
+private:
+ friend sighandler_t __wrap_signal(int signum, sighandler_t handler);
+ friend int __wrap_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact);
+
+ /**
+ * SIGSEGV handler registered with __wrap_signal or __wrap_sigaction.
+ */
+ struct sigaction action;
+
+ /**
+ * ElfLoader SIGSEGV handler.
+ */
+ static void handler(int signum, siginfo_t *info, void *context);
+
+ /**
+ * Size of the alternative stack. The printf family requires more than 8KB
+ * of stack, and our signal handler may print a few things.
+ */
+ static const size_t stackSize = 12 * 1024;
+
+ /**
+ * Alternative stack information used before initialization.
+ */
+ stack_t oldStack;
+
+ /**
+ * Pointer to an alternative stack for signals. Only set if oldStack is
+ * not set or not big enough.
+ */
+ MappedPtr stackPtr;
+};
+
+/**
+ * Elf Loader class in charge of loading and bookkeeping libraries.
+ */
+class ElfLoader: public SEGVHandler
+{
+public:
+ /**
+ * The Elf Loader instance
+ */
+ static ElfLoader Singleton;
+
+ /**
+ * Loads the given library with the given flags. Equivalent to dlopen()
+ * The extra "parent" argument optionally gives the handle of the library
+ * requesting the given library to be loaded. The loader may look in the
+ * directory containing that parent library for the library to load.
+ */
+ mozilla::TemporaryRef<LibHandle> Load(const char *path, int flags,
+ LibHandle *parent = NULL);
+
+ /**
+ * Returns the handle of the library containing the given address in
+ * its virtual address space, i.e. the library handle for which
+ * LibHandle::Contains returns true. Its purpose is to allow to
+ * implement dladdr().
+ */
+ mozilla::TemporaryRef<LibHandle> GetHandleByPtr(void *addr);
+
+protected:
+ /**
+ * Registers the given handle. This method is meant to be called by
+ * LibHandle subclass creators.
+ */
+ void Register(LibHandle *handle);
+
+ /**
+ * Forget about the given handle. This method is meant to be called by
+ * LibHandle subclass destructors.
+ */
+ void Forget(LibHandle *handle);
+
+ /* Last error. Used for dlerror() */
+ friend class SystemElf;
+ friend const char *__wrap_dlerror(void);
+ friend void *__wrap_dlsym(void *handle, const char *symbol);
+ friend int __wrap_dlclose(void *handle);
+ const char *lastError;
+
+private:
+ ElfLoader() { InitDebugger(); }
+ ~ElfLoader();
+
+ /* Bookkeeping */
+ typedef std::vector<LibHandle *> LibHandleList;
+ LibHandleList handles;
+
+protected:
+ friend class CustomElf;
+ /**
+ * Show some stats about Mappables in CustomElfs. The when argument is to
+ * be used by the caller to give an identifier of the when the stats call
+ * is made.
+ */
+ static void stats(const char *when);
+
+ /* Definition of static destructors as to be used for C++ ABI compatibility */
+ typedef void (*Destructor)(void *object);
+
+ /**
+ * C++ ABI makes static initializers register destructors through a specific
+ * atexit interface. On glibc/linux systems, the dso_handle is a pointer
+ * within a given library. On bionic/android systems, it is an undefined
+ * symbol. Making sense of the value is not really important, and all that
+ * is really important is that it is different for each loaded library, so
+ * that they can be discriminated when shutting down. For convenience, on
+ * systems where the dso handle is a symbol, that symbol is resolved to
+ * point at corresponding CustomElf.
+ *
+ * Destructors are registered with __*_atexit with an associated object to
+ * be passed as argument when it is called.
+ *
+ * When __cxa_finalize is called, destructors registered for the given
+ * DSO handle are called in the reverse order they were registered.
+ */
+#ifdef __ARM_EABI__
+ static int __wrap_aeabi_atexit(void *that, Destructor destructor,
+ void *dso_handle);
+#else
+ static int __wrap_cxa_atexit(Destructor destructor, void *that,
+ void *dso_handle);
+#endif
+
+ static void __wrap_cxa_finalize(void *dso_handle);
+
+ /**
+ * Registered destructor. Keeps track of the destructor function pointer,
+ * associated object to call it with, and DSO handle.
+ */
+ class DestructorCaller {
+ public:
+ DestructorCaller(Destructor destructor, void *object, void *dso_handle)
+ : destructor(destructor), object(object), dso_handle(dso_handle) { }
+
+ /**
+ * Call the destructor function with the associated object.
+ * Call only once, see CustomElf::~CustomElf.
+ */
+ void Call();
+
+ /**
+ * Returns whether the destructor is associated to the given DSO handle
+ */
+ bool IsForHandle(void *handle) const
+ {
+ return handle == dso_handle;
+ }
+
+ private:
+ Destructor destructor;
+ void *object;
+ void *dso_handle;
+ };
+
+private:
+ /* Keep track of all registered destructors */
+ std::vector<DestructorCaller> destructors;
+
+ /* Keep track of Zips used for library loading */
+ ZipCollection zips;
+
+ /* Forward declaration, see further below */
+ class r_debug;
+public:
+ /* Loaded object descriptor for the debugger interface below*/
+ struct link_map {
+ /* Base address of the loaded object. */
+ const void *l_addr;
+ /* File name */
+ const char *l_name;
+ /* Address of the PT_DYNAMIC segment. */
+ const void *l_ld;
+
+ private:
+ friend class ElfLoader::r_debug;
+ /* Double linked list of loaded objects. */
+ link_map *l_next, *l_prev;
+ };
+
+private:
+ /* Data structure used by the linker to give details about shared objects it
+ * loaded to debuggers. This is normally defined in link.h, but Android
+ * headers lack this file. This also gives the opportunity to make it C++. */
+ class r_debug {
+ public:
+ /* Make the debugger aware of a new loaded object */
+ void Add(link_map *map);
+
+ /* Make the debugger aware of the unloading of an object */
+ void Remove(link_map *map);
+
+ /* Iterates over all link_maps */
+ class iterator
+ {
+ public:
+ const link_map *operator ->() const
+ {
+ return item;
+ }
+
+ const link_map &operator ++()
+ {
+ item = item->l_next;
+ return *item;
+ }
+
+ bool operator<(const iterator &other) const
+ {
+ if (other.item == NULL)
+ return item ? true : false;
+ MOZ_NOT_REACHED("r_debug::iterator::operator< called with something else than r_debug::end()");
+ }
+ protected:
+ friend class r_debug;
+ iterator(const link_map *item): item(item) { }
+
+ private:
+ const link_map *item;
+ };
+
+ iterator begin() const
+ {
+ return iterator(r_map);
+ }
+
+ iterator end() const
+ {
+ return iterator(NULL);
+ }
+
+ private:
+ /* Version number of the protocol. */
+ int r_version;
+
+ /* Head of the linked list of loaded objects. */
+ struct link_map *r_map;
+
+ /* Function to be called when updates to the linked list of loaded objects
+ * are going to occur. The function is to be called before and after
+ * changes. */
+ void (*r_brk)(void);
+
+ /* Indicates to the debugger what state the linked list of loaded objects
+ * is in when the function above is called. */
+ enum {
+ RT_CONSISTENT, /* Changes are complete */
+ RT_ADD, /* Beginning to add a new object */
+ RT_DELETE /* Beginning to remove an object */
+ } r_state;
+ };
+ friend int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data);
+ r_debug *dbg;
+
+ /**
+ * Initializes the pointer to the debugger data structure.
+ */
+ void InitDebugger();
+};
+
+#endif /* ElfLoader_h */
diff --git a/sal/android/faulty.lib/linker/Elfxx.h b/sal/android/faulty.lib/linker/Elfxx.h
new file mode 100644
index 000000000000..cd871561bdaf
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Elfxx.h
@@ -0,0 +1,236 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Elfxx_h
+#define Elfxx_h
+
+/**
+ * Android system headers have two different elf.h file. The one under linux/
+ * is the most complete.
+ */
+#ifdef ANDROID
+#include <linux/elf.h>
+#else
+#include <elf.h>
+#endif
+#include <endian.h>
+
+/**
+ * Generic ELF macros for the target system
+ */
+#ifdef HAVE_64BIT_OS
+#define Elf_(type) Elf64_ ## type
+#define ELFCLASS ELFCLASS64
+#define ELF_R_TYPE ELF64_R_TYPE
+#define ELF_R_SYM ELF64_R_SYM
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF64_ST_BIND
+#endif
+#define PRIxAddr "lx"
+#else
+#define Elf_(type) Elf32_ ## type
+#define ELFCLASS ELFCLASS32
+#define ELF_R_TYPE ELF32_R_TYPE
+#define ELF_R_SYM ELF32_R_SYM
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF32_ST_BIND
+#endif
+#define PRIxAddr "x"
+#endif
+
+#ifndef __BYTE_ORDER
+#error Cannot find endianness
+#endif
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define ELFDATA ELFDATA2LSB
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define ELFDATA ELFDATA2MSB
+#endif
+
+#ifdef __linux__
+#define ELFOSABI ELFOSABI_LINUX
+#ifdef EI_ABIVERSION
+#define ELFABIVERSION 0
+#endif
+#else
+#error Unknown ELF OSABI
+#endif
+
+#if defined(__i386__)
+#define ELFMACHINE EM_386
+
+// Doing this way probably doesn't scale to other architectures
+#define R_ABS R_386_32
+#define R_GLOB_DAT R_386_GLOB_DAT
+#define R_JMP_SLOT R_386_JMP_SLOT
+#define R_RELATIVE R_386_RELATIVE
+#define RELOC(n) DT_REL ## n
+#define UNSUPPORTED_RELOC(n) DT_RELA ## n
+#define STR_RELOC(n) "DT_REL" # n
+#define Reloc Rel
+
+#elif defined(__x86_64__)
+#define ELFMACHINE EM_X86_64
+
+#define R_ABS R_X86_64_64
+#define R_GLOB_DAT R_X86_64_GLOB_DAT
+#define R_JMP_SLOT R_X86_64_JUMP_SLOT
+#define R_RELATIVE R_X86_64_RELATIVE
+#define RELOC(n) DT_RELA ## n
+#define UNSUPPORTED_RELOC(n) DT_REL ## n
+#define STR_RELOC(n) "DT_RELA" # n
+#define Reloc Rela
+
+#elif defined(__arm__)
+#define ELFMACHINE EM_ARM
+
+#ifndef R_ARM_ABS32
+#define R_ARM_ABS32 2
+#endif
+#ifndef R_ARM_GLOB_DAT
+#define R_ARM_GLOB_DAT 21
+#endif
+#ifndef R_ARM_JUMP_SLOT
+#define R_ARM_JUMP_SLOT 22
+#endif
+#ifndef R_ARM_RELATIVE
+#define R_ARM_RELATIVE 23
+#endif
+
+#define R_ABS R_ARM_ABS32
+#define R_GLOB_DAT R_ARM_GLOB_DAT
+#define R_JMP_SLOT R_ARM_JUMP_SLOT
+#define R_RELATIVE R_ARM_RELATIVE
+#define RELOC(n) DT_REL ## n
+#define UNSUPPORTED_RELOC(n) DT_RELA ## n
+#define STR_RELOC(n) "DT_REL" # n
+#define Reloc Rel
+
+#else
+#error Unknown ELF machine type
+#endif
+
+/**
+ * Android system headers don't have all definitions
+ */
+#ifndef STN_UNDEF
+#define STN_UNDEF 0
+#endif
+#ifndef DT_INIT_ARRAY
+#define DT_INIT_ARRAY 25
+#endif
+#ifndef DT_FINI_ARRAY
+#define DT_FINI_ARRAY 26
+#endif
+#ifndef DT_INIT_ARRAYSZ
+#define DT_INIT_ARRAYSZ 27
+#endif
+#ifndef DT_FINI_ARRAYSZ
+#define DT_FINI_ARRAYSZ 28
+#endif
+#ifndef DT_RELACOUNT
+#define DT_RELACOUNT 0x6ffffff9
+#endif
+#ifndef DT_RELCOUNT
+#define DT_RELCOUNT 0x6ffffffa
+#endif
+#ifndef DT_VERSYM
+#define DT_VERSYM 0x6ffffff0
+#endif
+#ifndef DT_VERDEF
+#define DT_VERDEF 0x6ffffffc
+#endif
+#ifndef DT_VERDEFNUM
+#define DT_VERDEFNUM 0x6ffffffd
+#endif
+#ifndef DT_VERNEED
+#define DT_VERNEED 0x6ffffffe
+#endif
+#ifndef DT_VERNEEDNUM
+#define DT_VERNEEDNUM 0x6fffffff
+#endif
+#ifndef DT_FLAGS
+#define DT_FLAGS 30
+#endif
+#ifndef DF_SYMBOLIC
+#define DF_SYMBOLIC 0x00000002
+#endif
+#ifndef DF_TEXTREL
+#define DF_TEXTREL 0x00000004
+#endif
+
+namespace Elf {
+
+/**
+ * Define a few basic Elf Types
+ */
+typedef Elf_(Phdr) Phdr;
+typedef Elf_(Dyn) Dyn;
+typedef Elf_(Sym) Sym;
+typedef Elf_(Addr) Addr;
+typedef Elf_(Word) Word;
+typedef Elf_(Half) Half;
+
+/**
+ * Helper class around the standard Elf header struct
+ */
+struct Ehdr: public Elf_(Ehdr)
+{
+ /**
+ * Equivalent to reinterpret_cast<const Ehdr *>(buf), but additionally
+ * checking that this is indeed an Elf header and that the Elf type
+ * corresponds to that of the system
+ */
+ static const Ehdr *validate(const void *buf);
+};
+
+/**
+ * Elf String table
+ */
+class Strtab: public UnsizedArray<const char>
+{
+public:
+ /**
+ * Returns the string at the given index in the table
+ */
+ const char *GetStringAt(off_t index) const
+ {
+ return &UnsizedArray<const char>::operator[](index);
+ }
+};
+
+/**
+ * Helper class around Elf relocation.
+ */
+struct Rel: public Elf_(Rel)
+{
+ /**
+ * Returns the addend for the relocation, which is the value stored
+ * at r_offset.
+ */
+ Addr GetAddend(void *base) const
+ {
+ return *(reinterpret_cast<const Addr *>(
+ reinterpret_cast<const char *>(base) + r_offset));
+ }
+};
+
+/**
+ * Helper class around Elf relocation with addend.
+ */
+struct Rela: public Elf_(Rela)
+{
+ /**
+ * Returns the addend for the relocation.
+ */
+ Addr GetAddend(void *base) const
+ {
+ return r_addend;
+ }
+};
+
+} /* namespace Elf */
+
+#endif /* Elfxx_h */
diff --git a/sal/android/faulty.lib/linker/Logging.h b/sal/android/faulty.lib/linker/Logging.h
new file mode 100644
index 000000000000..ed0f51a4a034
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Logging.h
@@ -0,0 +1,22 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Logging_h
+#define Logging_h
+
+#ifdef ANDROID
+#include <android/log.h>
+#define log(...) __android_log_print(ANDROID_LOG_ERROR, "GeckoLinker", __VA_ARGS__)
+#else
+#include <cstdio>
+#define log(format, ...) fprintf(stderr, format "\n", ##__VA_ARGS__)
+#endif
+
+#ifdef MOZ_DEBUG_LINKER
+#define debug log
+#else
+#define debug(...)
+#endif
+
+#endif /* Logging_h */
diff --git a/sal/android/faulty.lib/linker/Mappable.cxx b/sal/android/faulty.lib/linker/Mappable.cxx
new file mode 100644
index 000000000000..b57002542005
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Mappable.cxx
@@ -0,0 +1,519 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <cstring>
+#include <cstdlib>
+#include <cstdio>
+#include "Mappable.h"
+#ifdef ANDROID
+#include <linux/ashmem.h>
+#endif
+#include <sys/stat.h>
+#include "ElfLoader.h"
+#include "SeekableZStream.h"
+#include "Logging.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (~ (PAGE_SIZE - 1))
+#endif
+
+Mappable *
+MappableFile::Create(const char *path)
+{
+ int fd = open(path, O_RDONLY);
+ if (fd != -1)
+ return new MappableFile(fd);
+ return NULL;
+}
+
+void *
+MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
+ off_t offset)
+{
+ MOZ_ASSERT(fd != -1);
+ MOZ_ASSERT(!(flags & MAP_SHARED));
+ flags |= MAP_PRIVATE;
+
+ void *mapped = ::mmap(const_cast<void *>(addr), length, prot, flags,
+ fd, offset);
+ if (mapped == MAP_FAILED)
+ return mapped;
+
+ /* Fill the remainder of the last page with zeroes when the requested
+ * protection has write bits. */
+ if ((mapped != MAP_FAILED) && (prot & PROT_WRITE) &&
+ (length & (PAGE_SIZE - 1))) {
+ memset(reinterpret_cast<char *>(mapped) + length, 0,
+ PAGE_SIZE - (length & ~(PAGE_MASK)));
+ }
+ return mapped;
+}
+
+void
+MappableFile::finalize()
+{
+ /* Close file ; equivalent to close(fd.forget()) */
+ fd = -1;
+}
+
+Mappable *
+MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
+{
+ const char *cachePath = getenv("MOZ_LINKER_CACHE");
+ if (!cachePath || !*cachePath) {
+ log("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
+ "not extracting");
+ return NULL;
+ }
+ AutoDeleteArray<char> path = new char[strlen(cachePath) + strlen(name) + 2];
+ sprintf(path, "%s/%s", cachePath, name);
+ struct stat cacheStat;
+ if (stat(path, &cacheStat) == 0) {
+ struct stat zipStat;
+ stat(zip->GetName(), &zipStat);
+ if (cacheStat.st_mtime > zipStat.st_mtime) {
+ debug("Reusing %s", static_cast<char *>(path));
+ return MappableFile::Create(path);
+ }
+ }
+ debug("Extracting to %s", static_cast<char *>(path));
+ AutoCloseFD fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
+ S_IRUSR | S_IWUSR);
+ if (fd == -1) {
+ log("Couldn't open %s to decompress library", path.get());
+ return NULL;
+ }
+ AutoUnlinkFile file = path.forget();
+ if (stream->GetType() == Zip::Stream::DEFLATE) {
+ if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
+ log("Couldn't ftruncate %s to decompress library", file.get());
+ return NULL;
+ }
+ /* Map the temporary file for use as inflate buffer */
+ MappedPtr buffer(::mmap(NULL, stream->GetUncompressedSize(), PROT_WRITE,
+ MAP_SHARED, fd, 0), stream->GetUncompressedSize());
+ if (buffer == MAP_FAILED) {
+ log("Couldn't map %s to decompress library", file.get());
+ return NULL;
+ }
+
+ z_stream zStream = stream->GetZStream(buffer);
+
+ /* Decompress */
+ if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
+ log("inflateInit failed: %s", zStream.msg);
+ return NULL;
+ }
+ if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
+ log("inflate failed: %s", zStream.msg);
+ return NULL;
+ }
+ if (inflateEnd(&zStream) != Z_OK) {
+ log("inflateEnd failed: %s", zStream.msg);
+ return NULL;
+ }
+ if (zStream.total_out != stream->GetUncompressedSize()) {
+ log("File not fully uncompressed! %ld / %d", zStream.total_out,
+ static_cast<unsigned int>(stream->GetUncompressedSize()));
+ return NULL;
+ }
+ } else if (stream->GetType() == Zip::Stream::STORE) {
+ SeekableZStream zStream;
+ if (!zStream.Init(stream->GetBuffer())) {
+ log("Couldn't initialize SeekableZStream for %s", name);
+ return NULL;
+ }
+ if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
+ log("Couldn't ftruncate %s to decompress library", file.get());
+ return NULL;
+ }
+ MappedPtr buffer(::mmap(NULL, zStream.GetUncompressedSize(), PROT_WRITE,
+ MAP_SHARED, fd, 0), zStream.GetUncompressedSize());
+ if (buffer == MAP_FAILED) {
+ log("Couldn't map %s to decompress library", file.get());
+ return NULL;
+ }
+
+ if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
+ log("%s: failed to decompress", name);
+ return NULL;
+ }
+ } else {
+ return NULL;
+ }
+
+ return new MappableExtractFile(fd.forget(), file.forget());
+}
+
+MappableExtractFile::~MappableExtractFile()
+{
+ /* When destroying from a forked process, we don't want the file to be
+ * removed, as the main process is still using the file. Although it
+ * doesn't really matter, it helps e.g. valgrind that the file is there.
+ * The string still needs to be delete[]d, though */
+ if (pid != getpid())
+ delete [] path.forget();
+}
+
+/**
+ * _MappableBuffer is a buffer which content can be mapped at different
+ * locations in the virtual address space.
+ * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
+ * On Android, uses ashmem.
+ */
+class _MappableBuffer: public MappedPtr
+{
+public:
+ /**
+ * Returns a _MappableBuffer instance with the given name and the given
+ * length.
+ */
+ static _MappableBuffer *Create(const char *name, size_t length)
+ {
+ AutoCloseFD fd;
+#ifdef ANDROID
+ /* On Android, initialize an ashmem region with the given length */
+ fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
+ if (fd == -1)
+ return NULL;
+ char str[ASHMEM_NAME_LEN];
+ strlcpy(str, name, sizeof(str));
+ ioctl(fd, ASHMEM_SET_NAME, str);
+ if (ioctl(fd, ASHMEM_SET_SIZE, length))
+ return NULL;
+#else
+ /* On Linux, use /dev/shm as base directory for temporary files, assuming
+ * it's on tmpfs */
+ /* TODO: check that /dev/shm is tmpfs */
+ char str[256];
+ sprintf(str, "/dev/shm/%s.XXXXXX", name);
+ fd = mkstemp(str);
+ if (fd == -1)
+ return NULL;
+ unlink(str);
+ ftruncate(fd, length);
+#endif
+
+ void *buf = ::mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (buf != MAP_FAILED) {
+ debug("Decompression buffer of size %ld in "
+#ifdef ANDROID
+ "ashmem "
+#endif
+ "\"%s\", mapped @%p",
+ length, str, buf);
+ return new _MappableBuffer(fd.forget(), buf, length);
+ }
+ return NULL;
+ }
+
+ void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
+ {
+ MOZ_ASSERT(fd != -1);
+#ifdef ANDROID
+ /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
+ * there is content in the ashmem */
+ if (flags & MAP_PRIVATE) {
+ flags &= ~MAP_PRIVATE;
+ flags |= MAP_SHARED;
+ }
+#endif
+ return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
+ }
+
+private:
+ _MappableBuffer(int fd, void *buf, size_t length)
+ : MappedPtr(buf, length), fd(fd) { }
+
+ /* File descriptor for the temporary file or ashmem */
+ AutoCloseFD fd;
+};
+
+
+Mappable *
+MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
+{
+ MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
+ _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
+ if (buf)
+ return new MappableDeflate(buf, zip, stream);
+ return NULL;
+}
+
+MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
+ Zip::Stream *stream)
+: zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
+
+MappableDeflate::~MappableDeflate() { }
+
+void *
+MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
+{
+ MOZ_ASSERT(buffer);
+ MOZ_ASSERT(!(flags & MAP_SHARED));
+ flags |= MAP_PRIVATE;
+
+ /* The deflate stream is uncompressed up to the required offset + length, if
+ * it hasn't previously been uncompressed */
+ ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
+ if (missing > 0) {
+ uInt avail_out = zStream.avail_out;
+ zStream.avail_out = missing;
+ if ((*buffer == zStream.next_out) &&
+ (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
+ log("inflateInit failed: %s", zStream.msg);
+ return MAP_FAILED;
+ }
+ int ret = inflate(&zStream, Z_SYNC_FLUSH);
+ if (ret < 0) {
+ log("inflate failed: %s", zStream.msg);
+ return MAP_FAILED;
+ }
+ if (ret == Z_NEED_DICT) {
+ log("zstream requires a dictionary. %s", zStream.msg);
+ return MAP_FAILED;
+ }
+ zStream.avail_out = avail_out - missing + zStream.avail_out;
+ if (ret == Z_STREAM_END) {
+ if (inflateEnd(&zStream) != Z_OK) {
+ log("inflateEnd failed: %s", zStream.msg);
+ return MAP_FAILED;
+ }
+ if (zStream.total_out != buffer->GetLength()) {
+ log("File not fully uncompressed! %ld / %d", zStream.total_out,
+ static_cast<unsigned int>(buffer->GetLength()));
+ return MAP_FAILED;
+ }
+ }
+ }
+#if defined(ANDROID) && defined(__arm__)
+ if (prot & PROT_EXEC) {
+ /* We just extracted data that may be executed in the future.
+ * We thus need to ensure Instruction and Data cache coherency. */
+ debug("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
+ cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
+ reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
+ }
+#endif
+
+ return buffer->mmap(addr, length, prot, flags, offset);
+}
+
+void
+MappableDeflate::finalize()
+{
+ /* Free decompression buffer */
+ buffer = NULL;
+ /* Remove reference to Zip archive */
+ zip = NULL;
+}
+
+Mappable *
+MappableSeekableZStream::Create(const char *name, Zip *zip,
+ Zip::Stream *stream)
+{
+ MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
+ AutoDeletePtr<MappableSeekableZStream> mappable =
+ new MappableSeekableZStream(zip);
+
+ if (pthread_mutex_init(&mappable->mutex, NULL))
+ return NULL;
+
+ if (!mappable->zStream.Init(stream->GetBuffer()))
+ return NULL;
+
+ mappable->buffer = _MappableBuffer::Create(name,
+ mappable->zStream.GetUncompressedSize());
+ if (!mappable->buffer)
+ return NULL;
+
+ mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
+ memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
+
+ return mappable.forget();
+}
+
+MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
+: zip(zip), chunkAvailNum(0) { }
+
+MappableSeekableZStream::~MappableSeekableZStream()
+{
+ pthread_mutex_destroy(&mutex);
+}
+
+void *
+MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
+ int flags, off_t offset)
+{
+ /* Map with PROT_NONE so that accessing the mapping would segfault, and
+ * bring us to ensure() */
+ void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
+ if (res == MAP_FAILED)
+ return MAP_FAILED;
+
+ /* Store the mapping, ordered by offset and length */
+ std::vector<LazyMap>::reverse_iterator it;
+ for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
+ if ((it->offset < offset) ||
+ ((it->offset == offset) && (it->length < length)))
+ break;
+ }
+ LazyMap map = { res, length, prot, offset };
+ lazyMaps.insert(it.base(), map);
+ return res;
+}
+
+void
+MappableSeekableZStream::munmap(void *addr, size_t length)
+{
+ std::vector<LazyMap>::iterator it;
+ for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
+ if ((it->addr = addr) && (it->length == length)) {
+ lazyMaps.erase(it);
+ ::munmap(addr, length);
+ return;
+ }
+ MOZ_NOT_REACHED("munmap called with unknown mapping");
+}
+
+void
+MappableSeekableZStream::finalize() { }
+
+class AutoLock {
+public:
+ AutoLock(pthread_mutex_t *mutex): mutex(mutex)
+ {
+ if (pthread_mutex_lock(mutex))
+ MOZ_NOT_REACHED("pthread_mutex_lock failed");
+ }
+ ~AutoLock()
+ {
+ if (pthread_mutex_unlock(mutex))
+ MOZ_NOT_REACHED("pthread_mutex_unlock failed");
+ }
+private:
+ pthread_mutex_t *mutex;
+};
+
+bool
+MappableSeekableZStream::ensure(const void *addr)
+{
+ debug("ensure @%p", addr);
+ void *addrPage = reinterpret_cast<void *>
+ (reinterpret_cast<uintptr_t>(addr) & PAGE_MASK);
+ /* Find the mapping corresponding to the given page */
+ std::vector<LazyMap>::iterator map;
+ for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
+ if (map->Contains(addrPage))
+ break;
+ }
+ if (map == lazyMaps.end())
+ return false;
+
+ /* Find corresponding chunk */
+ off_t mapOffset = map->offsetOf(addrPage);
+ size_t chunk = mapOffset / zStream.GetChunkSize();
+
+ /* In the typical case, we just need to decompress the chunk entirely. But
+ * when the current mapping ends in the middle of the chunk, we want to
+ * stop there. However, if another mapping needs the last part of the
+ * chunk, we still need to continue. As mappings are ordered by offset
+ * and length, we don't need to scan the entire list of mappings.
+ * It is safe to run through lazyMaps here because the linker is never
+ * going to call mmap (which adds lazyMaps) while this function is
+ * called. */
+ size_t length = zStream.GetChunkSize(chunk);
+ size_t chunkStart = chunk * zStream.GetChunkSize();
+ size_t chunkEnd = chunkStart + length;
+ std::vector<LazyMap>::iterator it;
+ for (it = map; it < lazyMaps.end(); ++it) {
+ if (chunkEnd <= it->endOffset())
+ break;
+ }
+ if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
+ /* The mapping "it" points at now is past the interesting one */
+ --it;
+ length = it->endOffset() - chunkStart;
+ }
+
+ AutoLock lock(&mutex);
+
+ /* The very first page is mapped and accessed separately of the rest, and
+ * as such, only the first page of the first chunk is decompressed this way.
+ * When we fault in the remaining pages of that chunk, we want to decompress
+ * the complete chunk again. Short of doing that, we would end up with
+ * no data between PAGE_SIZE and chunkSize, which would effectively corrupt
+ * symbol resolution in the underlying library. */
+ if (chunkAvail[chunk] < (length + PAGE_SIZE - 1) / PAGE_SIZE) {
+ if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
+ return false;
+
+#if defined(ANDROID) && defined(__arm__)
+ if (map->prot & PROT_EXEC) {
+ /* We just extracted data that may be executed in the future.
+ * We thus need to ensure Instruction and Data cache coherency. */
+ debug("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
+ cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
+ reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
+ }
+#endif
+ /* Only count if we haven't already decompressed parts of the chunk */
+ if (chunkAvail[chunk] == 0)
+ chunkAvailNum++;
+
+ chunkAvail[chunk] = (length + PAGE_SIZE - 1) / PAGE_SIZE;
+ }
+
+ /* Flip the chunk mapping protection to the recorded flags. We could
+ * also flip the protection for other mappings of the same chunk,
+ * but it's easier to skip that and let further segfaults call
+ * ensure again. */
+ const void *chunkAddr = reinterpret_cast<const void *>
+ (reinterpret_cast<uintptr_t>(addrPage)
+ - mapOffset % zStream.GetChunkSize());
+ const void *chunkEndAddr = reinterpret_cast<const void *>
+ (reinterpret_cast<uintptr_t>(chunkAddr) + length);
+
+ const void *start = std::max(map->addr, chunkAddr);
+ const void *end = std::min(map->end(), chunkEndAddr);
+ length = reinterpret_cast<uintptr_t>(end)
+ - reinterpret_cast<uintptr_t>(start);
+
+ debug("mprotect @%p, 0x%x, 0x%x", start, length, map->prot);
+ if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
+ return true;
+
+ log("mprotect failed");
+ return false;
+}
+
+void
+MappableSeekableZStream::stats(const char *when, const char *name) const
+{
+ size_t nEntries = zStream.GetChunksNum();
+ debug("%s: %s; %ld/%ld chunks decompressed",
+ name, when, chunkAvailNum, nEntries);
+
+ size_t len = 64;
+ AutoDeleteArray<char> map = new char[len + 3];
+ map[0] = '[';
+
+ for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
+ map[j] = chunkAvail[i] ? '*' : '_';
+ if ((j == len) || (i == nEntries - 1)) {
+ map[j + 1] = ']';
+ map[j + 2] = '\0';
+ debug("%s", static_cast<char *>(map));
+ j = 0;
+ }
+ }
+}
diff --git a/sal/android/faulty.lib/linker/Mappable.h b/sal/android/faulty.lib/linker/Mappable.h
new file mode 100644
index 000000000000..98fe2c24909b
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Mappable.h
@@ -0,0 +1,248 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Mappable_h
+#define Mappable_h
+
+#include <sys/types.h>
+#include <pthread.h>
+#include "Zip.h"
+#include "SeekableZStream.h"
+#include "mozilla/RefPtr.h"
+#include "zlib.h"
+
+/**
+ * Abstract class to handle mmap()ing from various kind of entities, such as
+ * plain files or Zip entries. The virtual members are meant to act as the
+ * equivalent system functions, with a few differences:
+ * - mapped memory is always MAP_PRIVATE, even though a given implementation
+ * may use something different internally.
+ * - memory after length and up to the end of the corresponding page is nulled
+ * out.
+ */
+class Mappable
+{
+public:
+ virtual ~Mappable() { }
+
+ virtual void *mmap(const void *addr, size_t length, int prot, int flags,
+ off_t offset) = 0;
+
+private:
+ virtual void munmap(void *addr, size_t length) {
+ ::munmap(addr, length);
+ }
+ /* Limit use of Mappable::munmap to classes that keep track of the address
+ * and size of the mapping. This allows to ignore ::munmap return value. */
+ friend class Mappable1stPagePtr;
+
+public:
+ /**
+ * Ensures the availability of the memory pages for the page(s) containing
+ * the given address. Returns whether the pages were successfully made
+ * available.
+ */
+ virtual bool ensure(const void *addr) { return true; }
+
+ /**
+ * Indicate to a Mappable instance that no further mmap is going to happen.
+ */
+ virtual void finalize() = 0;
+
+ /**
+ * Shows some stats about the Mappable instance.
+ * Meant for MappableSeekableZStream only.
+ * As Mappables don't keep track of what they are instanciated for, the name
+ * argument is used to make the stats logging useful to the reader. The when
+ * argument is to be used by the caller to give an identifier of the when
+ * the stats call is made.
+ */
+ virtual void stats(const char *when, const char *name) const { }
+};
+
+/**
+ * Mappable implementation for plain files
+ */
+class MappableFile: public Mappable
+{
+public:
+ ~MappableFile() { }
+
+ /**
+ * Create a MappableFile instance for the given file path.
+ */
+ static Mappable *Create(const char *path);
+
+ /* Inherited from Mappable */
+ virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void finalize();
+
+protected:
+ MappableFile(int fd): fd(fd) { }
+
+private:
+ /* File descriptor */
+ AutoCloseFD fd;
+};
+
+/**
+ * Mappable implementation for deflated stream in a Zip archive
+ * Inflates the complete stream into a cache file.
+ */
+class MappableExtractFile: public MappableFile
+{
+public:
+ ~MappableExtractFile();
+
+ /**
+ * Create a MappableExtractFile instance for the given Zip stream. The name
+ * argument is used to create the cache file in the cache directory.
+ */
+ static Mappable *Create(const char *name, Zip *zip, Zip::Stream *stream);
+
+private:
+ MappableExtractFile(int fd, char *path)
+ : MappableFile(fd), path(path), pid(getpid()) { }
+
+ /**
+ * AutoUnlinkFile keeps track or a file name and removes (unlinks) the file
+ * when the instance is destroyed.
+ */
+ struct AutoUnlinkFileTraits: public AutoDeleteArrayTraits<char>
+ {
+ static void clean(char *value)
+ {
+ unlink(value);
+ AutoDeleteArrayTraits<char>::clean(value);
+ }
+ };
+ typedef AutoClean<AutoUnlinkFileTraits> AutoUnlinkFile;
+
+ /* Extracted file */
+ AutoUnlinkFile path;
+
+ /* Id of the process that initialized the instance */
+ pid_t pid;
+};
+
+class _MappableBuffer;
+
+/**
+ * Mappable implementation for deflated stream in a Zip archive.
+ * Inflates the mapped bits in a temporary buffer.
+ */
+class MappableDeflate: public Mappable
+{
+public:
+ ~MappableDeflate();
+
+ /**
+ * Create a MappableDeflate instance for the given Zip stream. The name
+ * argument is used for an appropriately named temporary file, and the Zip
+ * instance is given for the MappableDeflate to keep a reference of it.
+ */
+ static Mappable *Create(const char *name, Zip *zip, Zip::Stream *stream);
+
+ /* Inherited from Mappable */
+ virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void finalize();
+
+private:
+ MappableDeflate(_MappableBuffer *buf, Zip *zip, Zip::Stream *stream);
+
+ /* Zip reference */
+ mozilla::RefPtr<Zip> zip;
+
+ /* Decompression buffer */
+ AutoDeletePtr<_MappableBuffer> buffer;
+
+ /* Zlib data */
+ z_stream zStream;
+};
+
+/**
+ * Mappable implementation for seekable zStreams.
+ * Inflates the mapped bits in a temporary buffer, on demand.
+ */
+class MappableSeekableZStream: public Mappable
+{
+public:
+ ~MappableSeekableZStream();
+
+ /**
+ * Create a MappableSeekableZStream instance for the given Zip stream. The
+ * name argument is used for an appropriately named temporary file, and the
+ * Zip instance is given for the MappableSeekableZStream to keep a reference
+ * of it.
+ */
+ static Mappable *Create(const char *name, Zip *zip,
+ Zip::Stream *stream);
+
+ /* Inherited from Mappable */
+ virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+ virtual void munmap(void *addr, size_t length);
+ virtual void finalize();
+ virtual bool ensure(const void *addr);
+ virtual void stats(const char *when, const char *name) const;
+
+private:
+ MappableSeekableZStream(Zip *zip);
+
+ /* Zip reference */
+ mozilla::RefPtr<Zip> zip;
+
+ /* Decompression buffer */
+ AutoDeletePtr<_MappableBuffer> buffer;
+
+ /* Seekable ZStream */
+ SeekableZStream zStream;
+
+ /* Keep track of mappings performed with MappableSeekableZStream::mmap so
+ * that they can be realized by MappableSeekableZStream::ensure.
+ * Values stored in the struct are those passed to mmap */
+ struct LazyMap
+ {
+ const void *addr;
+ size_t length;
+ int prot;
+ off_t offset;
+
+ /* Returns addr + length, as a pointer */
+ const void *end() const {
+ return reinterpret_cast<const void *>
+ (reinterpret_cast<const unsigned char *>(addr) + length);
+ }
+
+ /* Returns offset + length */
+ const off_t endOffset() const {
+ return offset + length;
+ }
+
+ /* Returns the offset corresponding to the given address */
+ const off_t offsetOf(const void *ptr) const {
+ return reinterpret_cast<uintptr_t>(ptr)
+ - reinterpret_cast<uintptr_t>(addr) + offset;
+ }
+
+ /* Returns whether the given address is in the LazyMap range */
+ const bool Contains(const void *ptr) const {
+ return (ptr >= addr) && (ptr < end());
+ }
+ };
+
+ /* List of all mappings */
+ std::vector<LazyMap> lazyMaps;
+
+ /* Array keeping track of which chunks have already been decompressed.
+ * Each value is the number of pages decompressed for the given chunk. */
+ AutoDeleteArray<unsigned char> chunkAvail;
+
+ /* Number of chunks that have already been decompressed. */
+ size_t chunkAvailNum;
+
+ /* Mutex protecting decompression */
+ pthread_mutex_t mutex;
+};
+
+#endif /* Mappable_h */
diff --git a/sal/android/faulty.lib/linker/SeekableZStream.cxx b/sal/android/faulty.lib/linker/SeekableZStream.cxx
new file mode 100644
index 000000000000..0d399cbaa1c7
--- /dev/null
+++ b/sal/android/faulty.lib/linker/SeekableZStream.cxx
@@ -0,0 +1,100 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+#include "SeekableZStream.h"
+#include "Logging.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (~ (PAGE_SIZE - 1))
+#endif
+
+bool
+SeekableZStream::Init(const void *buf)
+{
+ const SeekableZStreamHeader *header = SeekableZStreamHeader::validate(buf);
+ if (!header) {
+ log("Not a seekable zstream");
+ return false;
+ }
+
+ buffer = reinterpret_cast<const unsigned char *>(buf);
+ totalSize = header->totalSize;
+ chunkSize = header->chunkSize;
+ lastChunkSize = header->lastChunkSize;
+ offsetTable.Init(&header[1], header->nChunks);
+
+ /* Sanity check */
+ if ((chunkSize == 0) ||
+ (chunkSize % PAGE_SIZE) ||
+ (chunkSize > 8 * PAGE_SIZE) ||
+ (offsetTable.numElements() < 1) ||
+ (lastChunkSize == 0) ||
+ (lastChunkSize > chunkSize)) {
+ log("Malformed or broken seekable zstream");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+SeekableZStream::Decompress(void *where, size_t chunk, size_t length)
+{
+ while (length) {
+ size_t len = std::min(length, static_cast<size_t>(chunkSize));
+ if (!DecompressChunk(where, chunk, len))
+ return false;
+ where = reinterpret_cast<unsigned char *>(where) + len;
+ length -= len;
+ chunk++;
+ }
+ return true;
+}
+
+bool
+SeekableZStream::DecompressChunk(void *where, size_t chunk, size_t length)
+{
+ if (chunk >= offsetTable.numElements()) {
+ log("DecompressChunk: chunk #%ld out of range [0-%ld)",
+ chunk, offsetTable.numElements());
+ return false;
+ }
+
+ bool isLastChunk = (chunk == offsetTable.numElements() - 1);
+
+ size_t chunkLen = isLastChunk ? lastChunkSize : chunkSize;
+
+ if (length == 0 || length > chunkLen)
+ length = chunkLen;
+
+ debug("DecompressChunk #%ld @%p (%ld/%ld)", chunk, where, length, chunkLen);
+ z_stream zStream;
+ memset(&zStream, 0, sizeof(zStream));
+ zStream.avail_in = (isLastChunk ? totalSize : uint32_t(offsetTable[chunk + 1]))
+ - uint32_t(offsetTable[chunk]);
+ zStream.next_in = const_cast<Bytef *>(buffer + uint32_t(offsetTable[chunk]));
+ zStream.avail_out = length;
+ zStream.next_out = reinterpret_cast<Bytef *>(where);
+
+ /* Decompress chunk */
+ if (inflateInit(&zStream) != Z_OK) {
+ log("inflateInit failed: %s", zStream.msg);
+ return false;
+ }
+ if (inflate(&zStream, (length == chunkLen) ? Z_FINISH : Z_SYNC_FLUSH)
+ != (length == chunkLen) ? Z_STREAM_END : Z_OK) {
+ log("inflate failed: %s", zStream.msg);
+ return false;
+ }
+ if (inflateEnd(&zStream) != Z_OK) {
+ log("inflateEnd failed: %s", zStream.msg);
+ return false;
+ }
+ return true;
+}
diff --git a/sal/android/faulty.lib/linker/SeekableZStream.h b/sal/android/faulty.lib/linker/SeekableZStream.h
new file mode 100644
index 000000000000..44243f054d83
--- /dev/null
+++ b/sal/android/faulty.lib/linker/SeekableZStream.h
@@ -0,0 +1,102 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SeekableZStream_h
+#define SeekableZStream_h
+
+#include "Zip.h"
+
+/**
+ * Seekable compressed stream are created by splitting the original
+ * decompressed data in small chunks and compress these chunks
+ * individually.
+ *
+ * The seekable compressed file format consists in a header defined below,
+ * followed by a table of 32-bits words containing the offsets for each
+ * individual compressed chunk, then followed by the compressed chunks.
+ */
+
+#pragma pack(1)
+struct SeekableZStreamHeader: public Zip::SignedEntity<SeekableZStreamHeader>
+{
+ SeekableZStreamHeader()
+ : Zip::SignedEntity<SeekableZStreamHeader>(magic)
+ , totalSize(0), chunkSize(0), nChunks(0), lastChunkSize(0) { }
+
+ /* Reuse Zip::SignedEntity to handle the magic number used in the Seekable
+ * ZStream file format. The magic number is "SeZz". */
+ static const uint32_t magic = 0x7a5a6553;
+
+ /* Total size of the stream, including the 4 magic bytes. */
+ le_uint32 totalSize;
+
+ /* Chunk size */
+ le_uint32 chunkSize;
+
+ /* Number of chunks */
+ le_uint32 nChunks;
+
+ /* Size of last chunk (> 0, <= Chunk size) */
+ le_uint32 lastChunkSize;
+};
+#pragma pack()
+
+MOZ_STATIC_ASSERT(sizeof(SeekableZStreamHeader) == 5 * 4,
+ "SeekableZStreamHeader should be 5 32-bits words");
+
+/**
+ * Helper class used to decompress Seekable ZStreams.
+ */
+class SeekableZStream {
+public:
+ /* Initialize from the given buffer. Returns whether initialization
+ * succeeded (true) or failed (false). */
+ bool Init(const void *buf);
+
+ /* Decompresses starting from the given chunk. The decompressed data is
+ * stored at the given location. The given length, in bytes, indicates
+ * how much data to decompress. If length is 0, then exactly one chunk
+ * is decompressed.
+ * Returns whether decompression succeeded (true) or failed (false). */
+ bool Decompress(void *where, size_t chunk, size_t length = 0);
+
+ /* Decompresses the given chunk at the given address. If a length is given,
+ * only decompresses that amount of data instead of the entire chunk.
+ * Returns whether decompression succeeded (true) or failed (false). */
+ bool DecompressChunk(void *where, size_t chunk, size_t length = 0);
+
+ /* Returns the uncompressed size of the complete zstream */
+ const size_t GetUncompressedSize() const
+ {
+ return (offsetTable.numElements() - 1) * chunkSize + lastChunkSize;
+ }
+
+ /* Returns the chunk size of the given chunk */
+ const size_t GetChunkSize(size_t chunk = 0) const {
+ return (chunk == offsetTable.numElements() - 1) ? lastChunkSize : chunkSize;
+ }
+
+ /* Returns the number of chunks */
+ const size_t GetChunksNum() const {
+ return offsetTable.numElements();
+ }
+
+private:
+ /* RAW Seekable SZtream buffer */
+ const unsigned char *buffer;
+
+ /* Total size of the stream, including the 4 magic bytes. */
+ uint32_t totalSize;
+
+ /* Chunk size */
+ uint32_t chunkSize;
+
+ /* Size of last chunk (> 0, <= Chunk size) */
+ uint32_t lastChunkSize;
+
+ /* Offsets table */
+ Array<le_uint32> offsetTable;
+};
+
+#endif /* SeekableZStream_h */
diff --git a/sal/android/faulty.lib/linker/Utils.h b/sal/android/faulty.lib/linker/Utils.h
new file mode 100644
index 000000000000..6e620b7dafcd
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Utils.h
@@ -0,0 +1,566 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Utils_h
+#define Utils_h
+
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include "mozilla/Assertions.h"
+
+/**
+ * On architectures that are little endian and that support unaligned reads,
+ * we can use direct type, but on others, we want to have a special class
+ * to handle conversion and alignment issues.
+ */
+#if !defined(DEBUG) && (defined(__i386__) || defined(__x86_64__))
+typedef uint16_t le_uint16;
+typedef uint32_t le_uint32;
+#else
+
+/**
+ * Template that allows to find an unsigned int type from a (computed) bit size
+ */
+template <int s> struct UInt { };
+template <> struct UInt<16> { typedef uint16_t Type; };
+template <> struct UInt<32> { typedef uint32_t Type; };
+
+/**
+ * Template to access 2 n-bit sized words as a 2*n-bit sized word, doing
+ * conversion from little endian and avoiding alignment issues.
+ */
+template <typename T>
+class le_to_cpu
+{
+public:
+ typedef typename UInt<16 * sizeof(T)>::Type Type;
+
+ operator Type() const
+ {
+ return (b << (sizeof(T) * 8)) | a;
+ }
+
+ const le_to_cpu& operator =(const Type &v)
+ {
+ a = v & ((1 << (sizeof(T) * 8)) - 1);
+ b = v >> (sizeof(T) * 8);
+ return *this;
+ }
+
+ le_to_cpu() { }
+ le_to_cpu(const Type &v)
+ {
+ operator =(v);
+ }
+
+ const le_to_cpu& operator +=(const Type &v)
+ {
+ return operator =(operator Type() + v);
+ }
+
+ const le_to_cpu& operator ++(int)
+ {
+ return operator =(operator Type() + 1);
+ }
+
+private:
+ T a, b;
+};
+
+/**
+ * Type definitions
+ */
+typedef le_to_cpu<unsigned char> le_uint16;
+typedef le_to_cpu<le_uint16> le_uint32;
+#endif
+
+/**
+ * AutoClean is a helper to create RAII wrappers
+ * The Traits class is expected to look like the following:
+ * struct Traits {
+ * // Define the type of the value stored in the wrapper
+ * typedef value_type type;
+ * // Returns the value corresponding to the uninitialized or freed state
+ * const static type None();
+ * // Cleans up resources corresponding to the wrapped value
+ * const static void clean(type);
+ * }
+ */
+template <typename Traits>
+class AutoClean
+{
+ typedef typename Traits::type T;
+public:
+ AutoClean(): value(Traits::None()) { }
+ AutoClean(const T& value): value(value) { }
+ ~AutoClean()
+ {
+ if (value != Traits::None())
+ Traits::clean(value);
+ }
+
+ operator const T&() const { return value; }
+ const T& operator->() const { return value; }
+ const T& get() const { return value; }
+
+ T forget()
+ {
+ T _value = value;
+ value = Traits::None();
+ return _value;
+ }
+
+ bool operator ==(T other) const
+ {
+ return value == other;
+ }
+
+ AutoClean& operator =(T other)
+ {
+ if (value != Traits::None())
+ Traits::clean(value);
+ value = other;
+ return *this;
+ }
+
+private:
+ T value;
+};
+
+/**
+ * AUTOCLEAN_TEMPLATE defines a templated class derived from AutoClean
+ * This allows to implement templates such as AutoFreePtr.
+ */
+#define AUTOCLEAN_TEMPLATE(name, Traits) \
+template <typename T> \
+struct name: public AutoClean<Traits<T> > \
+{ \
+ using AutoClean<Traits<T> >::operator =; \
+ name(): AutoClean<Traits<T> >() { } \
+ name(typename Traits<T>::type ptr): AutoClean<Traits<T> >(ptr) { } \
+}
+
+/**
+ * AutoCloseFD is a RAII wrapper for POSIX file descriptors
+ */
+struct AutoCloseFDTraits
+{
+ typedef int type;
+ static int None() { return -1; }
+ static void clean(int fd) { close(fd); }
+};
+typedef AutoClean<AutoCloseFDTraits> AutoCloseFD;
+
+/**
+ * AutoFreePtr is a RAII wrapper for pointers that need to be free()d.
+ *
+ * struct S { ... };
+ * AutoFreePtr<S> foo = malloc(sizeof(S));
+ * AutoFreePtr<char> bar = strdup(str);
+ */
+template <typename T>
+struct AutoFreePtrTraits
+{
+ typedef T *type;
+ static T *None() { return NULL; }
+ static void clean(T *ptr) { free(ptr); }
+};
+AUTOCLEAN_TEMPLATE(AutoFreePtr, AutoFreePtrTraits);
+
+/**
+ * AutoDeletePtr is a RAII wrapper for pointers that need to be deleted.
+ *
+ * struct S { ... };
+ * AutoDeletePtr<S> foo = new S();
+ */
+template <typename T>
+struct AutoDeletePtrTraits: public AutoFreePtrTraits<T>
+{
+ static void clean(T *ptr) { delete ptr; }
+};
+AUTOCLEAN_TEMPLATE(AutoDeletePtr, AutoDeletePtrTraits);
+
+/**
+ * AutoDeleteArray is a RAII wrapper for pointers that need to be delete[]ed.
+ *
+ * struct S { ... };
+ * AutoDeleteArray<S> foo = new S[42];
+ */
+template <typename T>
+struct AutoDeleteArrayTraits: public AutoFreePtrTraits<T>
+{
+ static void clean(T *ptr) { delete [] ptr; }
+};
+AUTOCLEAN_TEMPLATE(AutoDeleteArray, AutoDeleteArrayTraits);
+
+/**
+ * MappedPtr is a RAII wrapper for mmap()ed memory. It can be used as
+ * a simple void * or unsigned char *.
+ *
+ * It is defined as a derivative of a template that allows to use a
+ * different unmapping strategy.
+ */
+template <typename T>
+class GenericMappedPtr
+{
+public:
+ GenericMappedPtr(void *buf, size_t length): buf(buf), length(length) { }
+ GenericMappedPtr(): buf(MAP_FAILED), length(0) { }
+
+ void Assign(void *b, size_t len) {
+ if (buf != MAP_FAILED)
+ static_cast<T *>(this)->munmap(buf, length);
+ buf = b;
+ length = len;
+ }
+
+ ~GenericMappedPtr()
+ {
+ if (buf != MAP_FAILED)
+ static_cast<T *>(this)->munmap(buf, length);
+ }
+
+ operator void *() const
+ {
+ return buf;
+ }
+
+ operator unsigned char *() const
+ {
+ return reinterpret_cast<unsigned char *>(buf);
+ }
+
+ bool operator ==(void *ptr) const {
+ return buf == ptr;
+ }
+
+ bool operator ==(unsigned char *ptr) const {
+ return buf == ptr;
+ }
+
+ void *operator +(off_t offset) const
+ {
+ return reinterpret_cast<char *>(buf) + offset;
+ }
+
+ /**
+ * Returns whether the given address is within the mapped range
+ */
+ bool Contains(void *ptr) const
+ {
+ return (ptr >= buf) && (ptr < reinterpret_cast<char *>(buf) + length);
+ }
+
+ /**
+ * Returns the length of the mapped range
+ */
+ size_t GetLength() const
+ {
+ return length;
+ }
+
+private:
+ void *buf;
+ size_t length;
+};
+
+struct MappedPtr: public GenericMappedPtr<MappedPtr>
+{
+ MappedPtr(void *buf, size_t length)
+ : GenericMappedPtr<MappedPtr>(buf, length) { }
+ MappedPtr(): GenericMappedPtr<MappedPtr>() { }
+
+private:
+ friend class GenericMappedPtr<MappedPtr>;
+ void munmap(void *buf, size_t length)
+ {
+ ::munmap(buf, length);
+ }
+};
+
+/**
+ * UnsizedArray is a way to access raw arrays of data in memory.
+ *
+ * struct S { ... };
+ * UnsizedArray<S> a(buf);
+ * UnsizedArray<S> b; b.Init(buf);
+ *
+ * This is roughly equivalent to
+ * const S *a = reinterpret_cast<const S *>(buf);
+ * const S *b = NULL; b = reinterpret_cast<const S *>(buf);
+ *
+ * An UnsizedArray has no known length, and it's up to the caller to make
+ * sure the accessed memory is mapped and makes sense.
+ */
+template <typename T>
+class UnsizedArray
+{
+public:
+ typedef size_t idx_t;
+
+ /**
+ * Constructors and Initializers
+ */
+ UnsizedArray(): contents(NULL) { }
+ UnsizedArray(const void *buf): contents(reinterpret_cast<const T *>(buf)) { }
+
+ void Init(const void *buf)
+ {
+ MOZ_ASSERT(contents == NULL);
+ contents = reinterpret_cast<const T *>(buf);
+ }
+
+ /**
+ * Returns the nth element of the array
+ */
+ const T &operator[](const idx_t index) const
+ {
+ MOZ_ASSERT(contents);
+ return contents[index];
+ }
+
+ /**
+ * Returns whether the array points somewhere
+ */
+ operator bool() const
+ {
+ return contents != NULL;
+ }
+private:
+ const T *contents;
+};
+
+/**
+ * Array, like UnsizedArray, is a way to access raw arrays of data in memory.
+ * Unlike UnsizedArray, it has a known length, and is enumerable with an
+ * iterator.
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * UnsizedArray<S> b; b.Init(buf, len);
+ *
+ * In the above examples, len is the number of elements in the array. It is
+ * also possible to initialize an Array with the buffer size:
+ *
+ * Array<S> c; c.InitSize(buf, size);
+ *
+ * It is also possible to initialize an Array in two steps, only providing
+ * one data at a time:
+ *
+ * Array<S> d;
+ * d.Init(buf);
+ * d.Init(len); // or d.InitSize(size);
+ *
+ */
+template <typename T>
+class Array: public UnsizedArray<T>
+{
+public:
+ typedef typename UnsizedArray<T>::idx_t idx_t;
+
+ /**
+ * Constructors and Initializers
+ */
+ Array(): UnsizedArray<T>(), length(0) { }
+ Array(const void *buf, const idx_t length)
+ : UnsizedArray<T>(buf), length(length) { }
+
+ void Init(const void *buf)
+ {
+ UnsizedArray<T>::Init(buf);
+ }
+
+ void Init(const idx_t len)
+ {
+ MOZ_ASSERT(length == 0);
+ length = len;
+ }
+
+ void InitSize(const idx_t size)
+ {
+ Init(size / sizeof(T));
+ }
+
+ void Init(const void *buf, const idx_t len)
+ {
+ UnsizedArray<T>::Init(buf);
+ Init(len);
+ }
+
+ void InitSize(const void *buf, const idx_t size)
+ {
+ UnsizedArray<T>::Init(buf);
+ InitSize(size);
+ }
+
+ /**
+ * Returns the nth element of the array
+ */
+ const T &operator[](const idx_t index) const
+ {
+ MOZ_ASSERT(index < length);
+ MOZ_ASSERT(operator bool());
+ return UnsizedArray<T>::operator[](index);
+ }
+
+ /**
+ * Returns the number of elements in the array
+ */
+ idx_t numElements() const
+ {
+ return length;
+ }
+
+ /**
+ * Returns whether the array points somewhere and has at least one element.
+ */
+ operator bool() const
+ {
+ return (length > 0) && UnsizedArray<T>::operator bool();
+ }
+
+ /**
+ * Iterator for an Array. Use is similar to that of STL const_iterators:
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * for (Array<S>::iterator it = a.begin(); it < a.end(); ++it) {
+ * // Do something with *it.
+ * }
+ */
+ class iterator
+ {
+ public:
+ iterator(): item(NULL) { }
+
+ const T &operator *() const
+ {
+ return *item;
+ }
+
+ const T *operator ->() const
+ {
+ return item;
+ }
+
+ iterator &operator ++()
+ {
+ ++item;
+ return *this;
+ }
+
+ bool operator<(const iterator &other) const
+ {
+ return item < other.item;
+ }
+ protected:
+ friend class Array<T>;
+ iterator(const T &item): item(&item) { }
+
+ private:
+ const T *item;
+ };
+
+ /**
+ * Returns an iterator pointing at the beginning of the Array
+ */
+ iterator begin() const {
+ if (length)
+ return iterator(UnsizedArray<T>::operator[](0));
+ return iterator();
+ }
+
+ /**
+ * Returns an iterator pointing past the end of the Array
+ */
+ iterator end() const {
+ if (length)
+ return iterator(UnsizedArray<T>::operator[](length));
+ return iterator();
+ }
+
+ /**
+ * Reverse iterator for an Array. Use is similar to that of STL
+ * const_reverse_iterators:
+ *
+ * struct S { ... };
+ * Array<S> a(buf, len);
+ * for (Array<S>::reverse_iterator it = a.rbegin(); it < a.rend(); ++it) {
+ * // Do something with *it.
+ * }
+ */
+ class reverse_iterator
+ {
+ public:
+ reverse_iterator(): item(NULL) { }
+
+ const T &operator *() const
+ {
+ const T *tmp = item;
+ return *--tmp;
+ }
+
+ const T *operator ->() const
+ {
+ return &operator*();
+ }
+
+ reverse_iterator &operator ++()
+ {
+ --item;
+ return *this;
+ }
+
+ bool operator<(const reverse_iterator &other) const
+ {
+ return item > other.item;
+ }
+ protected:
+ friend class Array<T>;
+ reverse_iterator(const T &item): item(&item) { }
+
+ private:
+ const T *item;
+ };
+
+ /**
+ * Returns a reverse iterator pointing at the end of the Array
+ */
+ reverse_iterator rbegin() const {
+ if (length)
+ return reverse_iterator(UnsizedArray<T>::operator[](length));
+ return reverse_iterator();
+ }
+
+ /**
+ * Returns a reverse iterator pointing past the beginning of the Array
+ */
+ reverse_iterator rend() const {
+ if (length)
+ return reverse_iterator(UnsizedArray<T>::operator[](0));
+ return reverse_iterator();
+ }
+private:
+ idx_t length;
+};
+
+/**
+ * Transforms a pointer-to-function to a pointer-to-object pointing at the
+ * same address.
+ */
+template <typename T>
+void *FunctionPtr(T func)
+{
+ union {
+ void *ptr;
+ T func;
+ } f;
+ f.func = func;
+ return f.ptr;
+}
+
+#endif /* Utils_h */
+
diff --git a/sal/android/faulty.lib/linker/Zip.cxx b/sal/android/faulty.lib/linker/Zip.cxx
new file mode 100644
index 000000000000..d08ca6ba747d
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Zip.cxx
@@ -0,0 +1,180 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <cstdlib>
+#include <algorithm>
+#include "Logging.h"
+#include "Zip.h"
+
+Zip::Zip(const char *filename, ZipCollection *collection)
+: name(strdup(filename))
+, mapped(MAP_FAILED)
+, nextDir(NULL)
+, entries(NULL)
+, parent(collection)
+{
+ /* Open and map the file in memory */
+ AutoCloseFD fd(open(name, O_RDONLY));
+ if (fd == -1) {
+ log("Error opening %s: %s", filename, strerror(errno));
+ return;
+ }
+ struct stat st;
+ if (fstat(fd, &st) == -1) {
+ log("Error stating %s: %s", filename, strerror(errno));
+ return;
+ }
+ size = st.st_size;
+ if (size <= sizeof(CentralDirectoryEnd)) {
+ log("Error reading %s: too short", filename);
+ return;
+ }
+ mapped = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (mapped == MAP_FAILED) {
+ log("Error mmapping %s: %s", filename, strerror(errno));
+ return;
+ }
+ debug("Mapped %s @%p", filename, mapped);
+
+ /* Store the first Local File entry */
+ nextFile = LocalFile::validate(mapped);
+}
+
+Zip::~Zip()
+{
+ if (parent)
+ parent->Forget(this);
+ if (mapped != MAP_FAILED) {
+ munmap(mapped, size);
+ debug("Unmapped %s @%p", name, mapped);
+ }
+ free(name);
+}
+
+bool
+Zip::GetStream(const char *path, Zip::Stream *out) const
+{
+ debug("%s - GetFile %s", name, path);
+ /* Fast path: if the Local File header on store matches, we can return the
+ * corresponding stream right away.
+ * However, the Local File header may not contain enough information, in
+ * which case the 3rd bit on the generalFlag is set. Unfortunately, this
+ * bit is also set in some archives even when we do have the data (most
+ * notably the android packages as built by the Mozilla build system).
+ * So instead of testing the generalFlag bit, only use the fast path when
+ * we haven't read the central directory entries yet, and when the
+ * compressed size as defined in the header is not filled (which is a
+ * normal condition for the bit to be set). */
+ if (nextFile && nextFile->GetName().Equals(path) &&
+ !entries && (nextFile->compressedSize != 0)) {
+ debug("%s - %s was next file: fast path", name, path);
+ /* Fill Stream info from Local File header content */
+ const char *data = reinterpret_cast<const char *>(nextFile->GetData());
+ out->compressedBuf = data;
+ out->compressedSize = nextFile->compressedSize;
+ out->uncompressedSize = nextFile->uncompressedSize;
+ out->type = static_cast<Stream::Type>(uint16_t(nextFile->compression));
+
+ /* Find the next Local File header. It is usually simply following the
+ * compressed stream, but in cases where the 3rd bit of the generalFlag
+ * is set, there is a Data Descriptor header before. */
+ data += nextFile->compressedSize;
+ if ((nextFile->generalFlag & 0x8) && DataDescriptor::validate(data)) {
+ data += sizeof(DataDescriptor);
+ }
+ nextFile = LocalFile::validate(data);
+ return true;
+ }
+
+ /* If the directory entry we have in store doesn't match, scan the Central
+ * Directory for the entry corresponding to the given path */
+ if (!nextDir || !nextDir->GetName().Equals(path)) {
+ const DirectoryEntry *entry = GetFirstEntry();
+ debug("%s - Scan directory entries in search for %s", name, path);
+ while (entry && !entry->GetName().Equals(path)) {
+ entry = entry->GetNext();
+ }
+ nextDir = entry;
+ }
+ if (!nextDir) {
+ debug("%s - Couldn't find %s", name, path);
+ return false;
+ }
+
+ /* Find the Local File header corresponding to the Directory entry that
+ * was found. */
+ nextFile = LocalFile::validate(static_cast<const char *>(mapped)
+ + nextDir->offset);
+ if (!nextFile) {
+ log("%s - Couldn't find the Local File header for %s", name, path);
+ return false;
+ }
+
+ /* Fill Stream info from Directory entry content */
+ const char *data = reinterpret_cast<const char *>(nextFile->GetData());
+ out->compressedBuf = data;
+ out->compressedSize = nextDir->compressedSize;
+ out->uncompressedSize = nextDir->uncompressedSize;
+ out->type = static_cast<Stream::Type>(uint16_t(nextDir->compression));
+
+ /* Store the next directory entry */
+ nextDir = nextDir->GetNext();
+ nextFile = NULL;
+ return true;
+}
+
+const Zip::DirectoryEntry *
+Zip::GetFirstEntry() const
+{
+ if (entries || mapped == MAP_FAILED)
+ return entries; // entries is NULL in the second case above
+
+ const CentralDirectoryEnd *end = NULL;
+ const char *_end = static_cast<const char *>(mapped) + size
+ - sizeof(CentralDirectoryEnd);
+
+ /* Scan for the Central Directory End */
+ for (; _end > mapped && !end; _end--)
+ end = CentralDirectoryEnd::validate(_end);
+ if (!end) {
+ log("%s - Couldn't find end of central directory record", name);
+ return NULL;
+ }
+
+ entries = DirectoryEntry::validate(static_cast<const char *>(mapped)
+ + end->offset);
+ if (!entries) {
+ log("%s - Couldn't find central directory record", name);
+ }
+ return entries;
+}
+
+mozilla::TemporaryRef<Zip>
+ZipCollection::GetZip(const char *path)
+{
+ /* Search the list of Zips we already have for a match */
+ for (std::vector<Zip *>::iterator it = zips.begin(); it < zips.end(); ++it) {
+ if (strcmp((*it)->GetName(), path) == 0)
+ return *it;
+ }
+ Zip *zip = new Zip(path, this);
+ zips.push_back(zip);
+ return zip;
+}
+
+void
+ZipCollection::Forget(Zip *zip)
+{
+ debug("ZipCollection::Forget(\"%s\")", zip->GetName());
+ std::vector<Zip *>::iterator it = std::find(zips.begin(), zips.end(), zip);
+ if (*it == zip)
+ zips.erase(it);
+ else
+ debug("ZipCollection::Forget: didn't find \"%s\" in bookkeeping", zip->GetName());
+}
diff --git a/sal/android/faulty.lib/linker/Zip.h b/sal/android/faulty.lib/linker/Zip.h
new file mode 100644
index 000000000000..0ebcfd894bf7
--- /dev/null
+++ b/sal/android/faulty.lib/linker/Zip.h
@@ -0,0 +1,336 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Zip_h
+#define Zip_h
+
+#include <cstring>
+#include <stdint.h>
+#include <vector>
+#include <zlib.h>
+#include "Utils.h"
+#include "mozilla/RefPtr.h"
+
+/**
+ * Forward declaration
+ */
+class ZipCollection;
+
+/**
+ * Class to handle access to Zip archive streams. The Zip archive is mapped
+ * in memory, and streams are direct references to that mapped memory.
+ * Zip files are assumed to be correctly formed. No boundary checks are
+ * performed, which means hand-crafted malicious Zip archives can make the
+ * code fail in bad ways. However, since the only intended use is to load
+ * libraries from Zip archives, there is no interest in making this code
+ * safe, since the libraries could contain malicious code anyways.
+ */
+class Zip: public mozilla::RefCounted<Zip>
+{
+public:
+ /**
+ * Create a Zip instance for the given file name. In case of error, the
+ * Zip instance is still created but methods will error out.
+ */
+ Zip(const char *filename, ZipCollection *collection = NULL);
+
+ /**
+ * Destructor
+ */
+ ~Zip();
+
+ /**
+ * Class used to access Zip archive item streams
+ */
+ class Stream
+ {
+ public:
+ /**
+ * Stream types
+ */
+ enum Type {
+ STORE = 0,
+ DEFLATE = 8
+ };
+
+ /**
+ * Constructor
+ */
+ Stream(): compressedBuf(NULL), compressedSize(0), uncompressedSize(0)
+ , type(STORE) { }
+
+ /**
+ * Getters
+ */
+ const void *GetBuffer() { return compressedBuf; }
+ size_t GetSize() { return compressedSize; }
+ size_t GetUncompressedSize() { return uncompressedSize; }
+ Type GetType() { return type; }
+
+ /**
+ * Returns a z_stream for use with inflate functions using the given
+ * buffer as inflate output. The caller is expected to allocate enough
+ * memory for the Stream uncompressed size.
+ */
+ z_stream GetZStream(void *buf)
+ {
+ z_stream zStream;
+ memset(&zStream, 0, sizeof(zStream));
+ zStream.avail_in = compressedSize;
+ zStream.next_in = reinterpret_cast<Bytef *>(
+ const_cast<void *>(compressedBuf));
+ zStream.avail_out = uncompressedSize;
+ zStream.next_out = static_cast<Bytef *>(buf);
+ return zStream;
+ }
+
+ protected:
+ friend class Zip;
+ const void *compressedBuf;
+ size_t compressedSize;
+ size_t uncompressedSize;
+ Type type;
+ };
+
+ /**
+ * Returns a stream from the Zip archive.
+ */
+ bool GetStream(const char *path, Stream *out) const;
+
+ /**
+ * Returns the file name of the archive
+ */
+ const char *GetName() const
+ {
+ return name;
+ }
+
+private:
+ /* File name of the archive */
+ char *name;
+ /* Address where the Zip archive is mapped */
+ void *mapped;
+ /* Size of the archive */
+ size_t size;
+
+ /**
+ * Strings (file names, comments, etc.) in the Zip headers are NOT zero
+ * terminated. This class is a helper around them.
+ */
+ class StringBuf
+ {
+ public:
+ /**
+ * Constructor
+ */
+ StringBuf(const char *buf, size_t length): buf(buf), length(length) { }
+
+ /**
+ * Returns whether the string has the same content as the given zero
+ * terminated string.
+ */
+ bool Equals(const char *str) const
+ {
+ return strncmp(str, buf, length) == 0;
+ }
+
+ private:
+ const char *buf;
+ size_t length;
+ };
+
+/* All the following types need to be packed */
+#pragma pack(1)
+public:
+ /**
+ * A Zip archive is an aggregate of entities which all start with a
+ * signature giving their type. This template is to be used as a base
+ * class for these entities.
+ */
+ template <typename T>
+ class SignedEntity
+ {
+ public:
+ /**
+ * Equivalent to reinterpret_cast<const T *>(buf), with an additional
+ * check of the signature.
+ */
+ static const T *validate(const void *buf)
+ {
+ const T *ret = static_cast<const T *>(buf);
+ if (ret->signature == T::magic)
+ return ret;
+ return NULL;
+ }
+
+ SignedEntity(uint32_t magic): signature(magic) { }
+ private:
+ le_uint32 signature;
+ };
+
+private:
+ /**
+ * Header used to describe a Local File entry. The header is followed by
+ * the file name and an extra field, then by the data stream.
+ */
+ struct LocalFile: public SignedEntity<LocalFile>
+ {
+ /* Signature for a Local File header */
+ static const uint32_t magic = 0x04034b50;
+
+ /**
+ * Returns the file name
+ */
+ StringBuf GetName() const
+ {
+ return StringBuf(reinterpret_cast<const char *>(this) + sizeof(*this),
+ filenameSize);
+ }
+
+ /**
+ * Returns a pointer to the data associated with this header
+ */
+ const void *GetData() const
+ {
+ return reinterpret_cast<const char *>(this) + sizeof(*this)
+ + filenameSize + extraFieldSize;
+ }
+
+ le_uint16 minVersion;
+ le_uint16 generalFlag;
+ le_uint16 compression;
+ le_uint16 lastModifiedTime;
+ le_uint16 lastModifiedDate;
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ le_uint16 filenameSize;
+ le_uint16 extraFieldSize;
+ };
+
+ /**
+ * In some cases, when a zip archive is created, compressed size and CRC
+ * are not known when writing the Local File header. In these cases, the
+ * 3rd bit of the general flag in the Local File header is set, and there
+ * is an additional header following the compressed data.
+ */
+ struct DataDescriptor: public SignedEntity<DataDescriptor>
+ {
+ /* Signature for a Data Descriptor header */
+ static const uint32_t magic = 0x08074b50;
+
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ };
+
+ /**
+ * Header used to describe a Central Directory Entry. The header is
+ * followed by the file name, an extra field, and a comment.
+ */
+ struct DirectoryEntry: public SignedEntity<DirectoryEntry>
+ {
+ /* Signature for a Central Directory Entry header */
+ static const uint32_t magic = 0x02014b50;
+
+ /**
+ * Returns the file name
+ */
+ StringBuf GetName() const
+ {
+ return StringBuf(reinterpret_cast<const char *>(this) + sizeof(*this),
+ filenameSize);
+ }
+
+ /**
+ * Returns the Central Directory Entry following this one.
+ */
+ const DirectoryEntry *GetNext() const
+ {
+ return validate(reinterpret_cast<const char *>(this) + sizeof(*this)
+ + filenameSize + extraFieldSize + fileCommentSize);
+ }
+
+ le_uint16 creatorVersion;
+ le_uint16 minVersion;
+ le_uint16 generalFlag;
+ le_uint16 compression;
+ le_uint16 lastModifiedTime;
+ le_uint16 lastModifiedDate;
+ le_uint32 CRC32;
+ le_uint32 compressedSize;
+ le_uint32 uncompressedSize;
+ le_uint16 filenameSize;
+ le_uint16 extraFieldSize;
+ le_uint16 fileCommentSize;
+ le_uint16 diskNum;
+ le_uint16 internalAttributes;
+ le_uint32 externalAttributes;
+ le_uint32 offset;
+ };
+
+ /**
+ * Header used to describe the End of Central Directory Record.
+ */
+ struct CentralDirectoryEnd: public SignedEntity<CentralDirectoryEnd>
+ {
+ /* Signature for the End of Central Directory Record */
+ static const uint32_t magic = 0x06054b50;
+
+ le_uint16 diskNum;
+ le_uint16 startDisk;
+ le_uint16 recordsOnDisk;
+ le_uint16 records;
+ le_uint32 size;
+ le_uint32 offset;
+ le_uint16 commentSize;
+ };
+#pragma pack()
+
+ /**
+ * Returns the first Directory entry
+ */
+ const DirectoryEntry *GetFirstEntry() const;
+
+ /* Pointer to the Local File Entry following the last one GetStream() used.
+ * This is used by GetStream to avoid scanning the Directory Entries when the
+ * requested entry is that one. */
+ mutable const LocalFile *nextFile;
+
+ /* Likewise for the next Directory entry */
+ mutable const DirectoryEntry *nextDir;
+
+ /* Pointer to the Directory entries */
+ mutable const DirectoryEntry *entries;
+
+ /* ZipCollection containing this Zip */
+ mutable ZipCollection *parent;
+};
+
+/**
+ * Class for bookkeeping Zip instances
+ */
+class ZipCollection
+{
+public:
+ /**
+ * Get a Zip instance for the given path. If there is an existing one
+ * already, return that one, otherwise create a new one.
+ */
+ mozilla::TemporaryRef<Zip> GetZip(const char *path);
+
+protected:
+ /**
+ * Forget about the given Zip instance. This method is meant to be called
+ * by the Zip destructor.
+ */
+ friend Zip::~Zip();
+ void Forget(Zip *zip);
+
+private:
+ /* Zip instances bookkept in this collection */
+ std::vector<Zip *> zips;
+};
+
+#endif /* Zip_h */
diff --git a/sal/android/faulty.lib/mozilla/Assertions.h b/sal/android/faulty.lib/mozilla/Assertions.h
new file mode 100644
index 000000000000..407fb24e98d1
--- /dev/null
+++ b/sal/android/faulty.lib/mozilla/Assertions.h
@@ -0,0 +1,376 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of runtime and static assertion macros for C and C++. */
+
+#ifndef mozilla_Assertions_h_
+#define mozilla_Assertions_h_
+
+#include "mozilla/Attributes.h"
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef WIN32
+ /*
+ * TerminateProcess and GetCurrentProcess are defined in <winbase.h>, which
+ * further depends on <windef.h>. We hardcode these few definitions manually
+ * because those headers clutter the global namespace with a significant
+ * number of undesired macros and symbols.
+ */
+# ifdef __cplusplus
+ extern "C" {
+# endif
+ __declspec(dllimport) int __stdcall
+ TerminateProcess(void* hProcess, unsigned int uExitCode);
+ __declspec(dllimport) void* __stdcall GetCurrentProcess(void);
+# ifdef __cplusplus
+ }
+# endif
+#else
+# include <signal.h>
+#endif
+#ifdef ANDROID
+# include <android/log.h>
+#endif
+
+/*
+ * MOZ_STATIC_ASSERT may be used to assert a condition *at compile time*. This
+ * can be useful when you make certain assumptions about what must hold for
+ * optimal, or even correct, behavior. For example, you might assert that the
+ * size of a struct is a multiple of the target architecture's word size:
+ *
+ * struct S { ... };
+ * MOZ_STATIC_ASSERT(sizeof(S) % sizeof(size_t) == 0,
+ * "S should be a multiple of word size for efficiency");
+ *
+ * This macro can be used in any location where both an extern declaration and a
+ * typedef could be used.
+ *
+ * Be aware of the gcc 4.2 concerns noted further down when writing patches that
+ * use this macro, particularly if a patch only bounces on OS X.
+ */
+#ifdef __cplusplus
+# if defined(__clang__)
+# ifndef __has_extension
+# define __has_extension __has_feature /* compatibility, for older versions of clang */
+# endif
+# if __has_extension(cxx_static_assert)
+# define MOZ_STATIC_ASSERT(cond, reason) static_assert((cond), reason)
+# endif
+# elif defined(__GNUC__)
+# if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+# define MOZ_STATIC_ASSERT(cond, reason) static_assert((cond), reason)
+# endif
+# elif defined(_MSC_VER)
+# if _MSC_VER >= 1600 /* MSVC 10 */
+# define MOZ_STATIC_ASSERT(cond, reason) static_assert((cond), reason)
+# endif
+# elif defined(__HP_aCC)
+# if __HP_aCC >= 62500 && defined(_HP_CXX0x_SOURCE)
+# define MOZ_STATIC_ASSERT(cond, reason) static_assert((cond), reason)
+# endif
+# endif
+#endif
+#ifndef MOZ_STATIC_ASSERT
+# define MOZ_STATIC_ASSERT_GLUE1(x, y) x##y
+# define MOZ_STATIC_ASSERT_GLUE(x, y) MOZ_STATIC_ASSERT_GLUE1(x, y)
+# if defined(__SUNPRO_CC)
+ /*
+ * The Sun Studio C++ compiler is buggy when declaring, inside a function,
+ * another extern'd function with an array argument whose length contains a
+ * sizeof, triggering the error message "sizeof expression not accepted as
+ * size of array parameter". This bug (6688515, not public yet) would hit
+ * defining moz_static_assert as a function, so we always define an extern
+ * array for Sun Studio.
+ *
+ * We include the line number in the symbol name in a best-effort attempt
+ * to avoid conflicts (see below).
+ */
+# define MOZ_STATIC_ASSERT(cond, reason) \
+ extern char MOZ_STATIC_ASSERT_GLUE(moz_static_assert, __LINE__)[(cond) ? 1 : -1]
+# elif defined(__COUNTER__)
+ /*
+ * If there was no preferred alternative, use a compiler-agnostic version.
+ *
+ * Note that the non-__COUNTER__ version has a bug in C++: it can't be used
+ * in both |extern "C"| and normal C++ in the same translation unit. (Alas
+ * |extern "C"| isn't allowed in a function.) The only affected compiler
+ * we really care about is gcc 4.2. For that compiler and others like it,
+ * we include the line number in the function name to do the best we can to
+ * avoid conflicts. These should be rare: a conflict would require use of
+ * MOZ_STATIC_ASSERT on the same line in separate files in the same
+ * translation unit, *and* the uses would have to be in code with
+ * different linkage, *and* the first observed use must be in C++-linkage
+ * code.
+ */
+# define MOZ_STATIC_ASSERT(cond, reason) \
+ typedef int MOZ_STATIC_ASSERT_GLUE(moz_static_assert, __COUNTER__)[(cond) ? 1 : -1]
+# else
+# define MOZ_STATIC_ASSERT(cond, reason) \
+ extern void MOZ_STATIC_ASSERT_GLUE(moz_static_assert, __LINE__)(int arg[(cond) ? 1 : -1])
+# endif
+#endif
+
+#define MOZ_STATIC_ASSERT_IF(cond, expr, reason) MOZ_STATIC_ASSERT(!(cond) || (expr), reason)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * MOZ_CRASH crashes the program, plain and simple, in a Breakpad-compatible
+ * way, in both debug and release builds.
+ *
+ * MOZ_CRASH is a good solution for "handling" failure cases when you're
+ * unwilling or unable to handle them more cleanly -- for OOM, for likely memory
+ * corruption, and so on. It's also a good solution if you need safe behavior
+ * in release builds as well as debug builds. But if the failure is one that
+ * should be debugged and fixed, MOZ_ASSERT is generally preferable.
+ */
+#if defined(_MSC_VER)
+ /*
+ * On MSVC use the __debugbreak compiler intrinsic, which produces an inline
+ * (not nested in a system function) breakpoint. This distinctively invokes
+ * Breakpad without requiring system library symbols on all stack-processing
+ * machines, as a nested breakpoint would require. We use TerminateProcess
+ * with the exit code aborting would generate because we don't want to invoke
+ * atexit handlers, destructors, library unload handlers, and so on when our
+ * process might be in a compromised state. We don't use abort() because
+ * it'd cause Windows to annoyingly pop up the process error dialog multiple
+ * times. See bug 345118 and bug 426163.
+ *
+ * (Technically these are Windows requirements, not MSVC requirements. But
+ * practically you need MSVC for debugging, and we only ship builds created
+ * by MSVC, so doing it this way reduces complexity.)
+ */
+# ifdef __cplusplus
+# define MOZ_CRASH() \
+ do { \
+ __debugbreak(); \
+ *((volatile int*) NULL) = 123; \
+ ::TerminateProcess(::GetCurrentProcess(), 3); \
+ } while (0)
+# else
+# define MOZ_CRASH() \
+ do { \
+ __debugbreak(); \
+ *((volatile int*) NULL) = 123; \
+ TerminateProcess(GetCurrentProcess(), 3); \
+ } while (0)
+# endif
+#else
+# ifdef __cplusplus
+# define MOZ_CRASH() \
+ do { \
+ *((volatile int*) NULL) = 123; \
+ ::abort(); \
+ } while (0)
+# else
+# define MOZ_CRASH() \
+ do { \
+ *((volatile int*) NULL) = 123; \
+ abort(); \
+ } while (0)
+# endif
+#endif
+
+/*
+ * Prints |s| as an assertion failure (using file and ln as the location of the
+ * assertion) to the standard debug-output channel.
+ *
+ * Usually you should use MOZ_ASSERT instead of this method. This method is
+ * primarily for internal use in this header, and only secondarily for use in
+ * implementing release-build assertions.
+ */
+static MOZ_ALWAYS_INLINE void
+MOZ_ReportAssertionFailure(const char* s, const char* file, int ln)
+{
+#ifdef ANDROID
+ __android_log_print(ANDROID_LOG_FATAL, "MOZ_Assert",
+ "Assertion failure: %s, at %s:%d\n", s, file, ln);
+#else
+ fprintf(stderr, "Assertion failure: %s, at %s:%d\n", s, file, ln);
+ fflush(stderr);
+#endif
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+/*
+ * MOZ_ASSERT(expr [, explanation-string]) asserts that |expr| must be truthy in
+ * debug builds. If it is, execution continues. Otherwise, an error message
+ * including the expression and the explanation-string (if provided) is printed,
+ * an attempt is made to invoke any existing debugger, and execution halts.
+ * MOZ_ASSERT is fatal: no recovery is possible. Do not assert a condition
+ * which can correctly be falsy.
+ *
+ * The optional explanation-string, if provided, must be a string literal
+ * explaining the assertion. It is intended for use with assertions whose
+ * correctness or rationale is non-obvious, and for assertions where the "real"
+ * condition being tested is best described prosaically. Don't provide an
+ * explanation if it's not actually helpful.
+ *
+ * // No explanation needed: pointer arguments often must not be NULL.
+ * MOZ_ASSERT(arg);
+ *
+ * // An explanation can be helpful to explain exactly how we know an
+ * // assertion is valid.
+ * MOZ_ASSERT(state == WAITING_FOR_RESPONSE,
+ * "given that <thingA> and <thingB>, we must have...");
+ *
+ * // Or it might disambiguate multiple identical (save for their location)
+ * // assertions of the same expression.
+ * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(),
+ * "we already set [[PrimitiveThis]] for this Boolean object");
+ * MOZ_ASSERT(getSlot(PRIMITIVE_THIS_SLOT).isUndefined(),
+ * "we already set [[PrimitiveThis]] for this String object");
+ *
+ * MOZ_ASSERT has no effect in non-debug builds. It is designed to catch bugs
+ * *only* during debugging, not "in the field".
+ */
+#ifdef DEBUG
+ /* First the single-argument form. */
+# define MOZ_ASSERT_HELPER1(expr) \
+ do { \
+ if (!(expr)) { \
+ MOZ_ReportAssertionFailure(#expr, __FILE__, __LINE__); \
+ MOZ_CRASH(); \
+ } \
+ } while (0)
+ /* Now the two-argument form. */
+# define MOZ_ASSERT_HELPER2(expr, explain) \
+ do { \
+ if (!(expr)) { \
+ MOZ_ReportAssertionFailure(#expr " (" explain ")", __FILE__, __LINE__); \
+ MOZ_CRASH(); \
+ } \
+ } while (0)
+ /* And now, helper macrology up the wazoo. */
+ /*
+ * Count the number of arguments passed to MOZ_ASSERT, very carefully
+ * tiptoeing around an MSVC bug where it improperly expands __VA_ARGS__ as a
+ * single token in argument lists. See these URLs for details:
+ *
+ * http://connect.microsoft.com/VisualStudio/feedback/details/380090/variadic-macro-replacement
+ * http://cplusplus.co.il/2010/07/17/variadic-macro-to-count-number-of-arguments/#comment-644
+ */
+# define MOZ_COUNT_ASSERT_ARGS_IMPL2(_1, _2, count, ...) \
+ count
+# define MOZ_COUNT_ASSERT_ARGS_IMPL(args) \
+ MOZ_COUNT_ASSERT_ARGS_IMPL2 args
+# define MOZ_COUNT_ASSERT_ARGS(...) \
+ MOZ_COUNT_ASSERT_ARGS_IMPL((__VA_ARGS__, 2, 1, 0))
+ /* Pick the right helper macro to invoke. */
+# define MOZ_ASSERT_CHOOSE_HELPER2(count) MOZ_ASSERT_HELPER##count
+# define MOZ_ASSERT_CHOOSE_HELPER1(count) MOZ_ASSERT_CHOOSE_HELPER2(count)
+# define MOZ_ASSERT_CHOOSE_HELPER(count) MOZ_ASSERT_CHOOSE_HELPER1(count)
+ /* The actual macro. */
+# define MOZ_ASSERT_GLUE(x, y) x y
+# define MOZ_ASSERT(...) \
+ MOZ_ASSERT_GLUE(MOZ_ASSERT_CHOOSE_HELPER(MOZ_COUNT_ASSERT_ARGS(__VA_ARGS__)), \
+ (__VA_ARGS__))
+#else
+# define MOZ_ASSERT(...) do { } while(0)
+#endif /* DEBUG */
+
+/*
+ * MOZ_ASSERT_IF(cond1, cond2) is equivalent to MOZ_ASSERT(cond2) if cond1 is
+ * true.
+ *
+ * MOZ_ASSERT_IF(isPrime(num), num == 2 || isOdd(num));
+ *
+ * As with MOZ_ASSERT, MOZ_ASSERT_IF has effect only in debug builds. It is
+ * designed to catch bugs during debugging, not "in the field".
+ */
+#ifdef DEBUG
+# define MOZ_ASSERT_IF(cond, expr) \
+ do { \
+ if (cond) \
+ MOZ_ASSERT(expr); \
+ } while (0)
+#else
+# define MOZ_ASSERT_IF(cond, expr) do { } while (0)
+#endif
+
+/*
+ * MOZ_NOT_REACHED_MARKER() expands to an expression which states that it is
+ * undefined behavior for execution to reach this point. No guarantees are made
+ * about what will happen if this is reached at runtime. Most code should
+ * probably use the higher level MOZ_NOT_REACHED, which uses this when
+ * appropriate.
+ */
+#if defined(__clang__)
+# define MOZ_NOT_REACHED_MARKER() __builtin_unreachable()
+#elif defined(__GNUC__)
+ /*
+ * __builtin_unreachable() was implemented in gcc 4.5. If we don't have
+ * that, call a noreturn function; abort() will do nicely. Qualify the call
+ * in C++ in case there's another abort() visible in local scope.
+ */
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+# define MOZ_NOT_REACHED_MARKER() __builtin_unreachable()
+# else
+# ifdef __cplusplus
+# define MOZ_NOT_REACHED_MARKER() ::abort()
+# else
+# define MOZ_NOT_REACHED_MARKER() abort()
+# endif
+# endif
+#elif defined(_MSC_VER)
+# define MOZ_NOT_REACHED_MARKER() __assume(0)
+#else
+# ifdef __cplusplus
+# define MOZ_NOT_REACHED_MARKER() ::abort()
+# else
+# define MOZ_NOT_REACHED_MARKER() abort()
+# endif
+#endif
+
+/*
+ * MOZ_NOT_REACHED(reason) indicates that the given point can't be reached
+ * during execution: simply reaching that point in execution is a bug. It takes
+ * as an argument an error message indicating the reason why that point should
+ * not have been reachable.
+ *
+ * // ...in a language parser...
+ * void handle(BooleanLiteralNode node)
+ * {
+ * if (node.isTrue())
+ * handleTrueLiteral();
+ * else if (node.isFalse())
+ * handleFalseLiteral();
+ * else
+ * MOZ_NOT_REACHED("boolean literal that's not true or false?");
+ * }
+ */
+#if defined(DEBUG)
+# define MOZ_NOT_REACHED(reason) \
+ do { \
+ MOZ_ASSERT(false, reason); \
+ MOZ_NOT_REACHED_MARKER(); \
+ } while (0)
+#else
+# define MOZ_NOT_REACHED(reason) MOZ_NOT_REACHED_MARKER()
+#endif
+
+/*
+ * MOZ_ALWAYS_TRUE(expr) and MOZ_ALWAYS_FALSE(expr) always evaluate the provided
+ * expression, in debug builds and in release builds both. Then, in debug
+ * builds only, the value of the expression is asserted either true or false
+ * using MOZ_ASSERT.
+ */
+#ifdef DEBUG
+# define MOZ_ALWAYS_TRUE(expr) MOZ_ASSERT((expr))
+# define MOZ_ALWAYS_FALSE(expr) MOZ_ASSERT(!(expr))
+#else
+# define MOZ_ALWAYS_TRUE(expr) ((void)(expr))
+# define MOZ_ALWAYS_FALSE(expr) ((void)(expr))
+#endif
+
+#endif /* mozilla_Assertions_h_ */
diff --git a/sal/android/faulty.lib/mozilla/Attributes.h b/sal/android/faulty.lib/mozilla/Attributes.h
new file mode 100644
index 000000000000..0cfcd60336b9
--- /dev/null
+++ b/sal/android/faulty.lib/mozilla/Attributes.h
@@ -0,0 +1,322 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Implementations of various class and method modifier attributes. */
+
+#ifndef mozilla_Attributes_h_
+#define mozilla_Attributes_h_
+
+/*
+ * This header does not include any other headers so that it can be included by
+ * code that is (only currently) mfbt-incompatible.
+ */
+
+/*
+ * MOZ_INLINE is a macro which expands to tell the compiler that the method
+ * decorated with it should be inlined. This macro is usable from C and C++
+ * code, even though C89 does not support the |inline| keyword. The compiler
+ * may ignore this directive if it chooses.
+ */
+#if defined(__cplusplus)
+# define MOZ_INLINE inline
+#elif defined(_MSC_VER)
+# define MOZ_INLINE __inline
+#elif defined(__GNUC__)
+# define MOZ_INLINE __inline__
+#else
+# define MOZ_INLINE inline
+#endif
+
+/*
+ * MOZ_ALWAYS_INLINE is a macro which expands to tell the compiler that the
+ * method decorated with it must be inlined, even if the compiler thinks
+ * otherwise. This is only a (much) stronger version of the MOZ_INLINE hint:
+ * compilers are not guaranteed to respect it (although they're much more likely
+ * to do so).
+ */
+#if defined(DEBUG)
+# define MOZ_ALWAYS_INLINE MOZ_INLINE
+#elif defined(_MSC_VER)
+# define MOZ_ALWAYS_INLINE __forceinline
+#elif defined(__GNUC__)
+# define MOZ_ALWAYS_INLINE __attribute__((always_inline)) MOZ_INLINE
+#else
+# define MOZ_ALWAYS_INLINE MOZ_INLINE
+#endif
+
+/*
+ * g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
+ * without warnings (functionality used by the macros below). These modes are
+ * detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or, more
+ * standardly, by checking whether __cplusplus has a C++11 or greater value.
+ * Current versions of g++ do not correctly set __cplusplus, so we check both
+ * for forward compatibility.
+ */
+#if defined(__clang__)
+ /*
+ * Per Clang documentation, "Note that marketing version numbers should not
+ * be used to check for language features, as different vendors use different
+ * numbering schemes. Instead, use the feature checking macros."
+ */
+# ifndef __has_extension
+# define __has_extension __has_feature /* compatibility, for older versions of clang */
+# endif
+# if __has_extension(cxx_deleted_functions)
+# define MOZ_HAVE_CXX11_DELETE
+# endif
+# if __has_extension(cxx_override_control)
+# define MOZ_HAVE_CXX11_OVERRIDE
+# define MOZ_HAVE_CXX11_FINAL final
+# endif
+# if __has_attribute(noinline)
+# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline))
+# endif
+# if __has_attribute(noreturn)
+# define MOZ_HAVE_NORETURN __attribute__((noreturn))
+# endif
+#elif defined(__GNUC__)
+# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+# if __GNUC__ > 4
+# define MOZ_HAVE_CXX11_DELETE
+# define MOZ_HAVE_CXX11_OVERRIDE
+# define MOZ_HAVE_CXX11_FINAL final
+# elif __GNUC__ == 4
+# if __GNUC_MINOR__ >= 7
+# define MOZ_HAVE_CXX11_OVERRIDE
+# define MOZ_HAVE_CXX11_FINAL final
+# endif
+# if __GNUC_MINOR__ >= 4
+# define MOZ_HAVE_CXX11_DELETE
+# endif
+# endif
+# else
+ /* __final is a non-C++11 GCC synonym for 'final', per GCC r176655. */
+# if __GNUC__ > 4
+# define MOZ_HAVE_CXX11_FINAL __final
+# elif __GNUC__ == 4
+# if __GNUC_MINOR__ >= 7
+# define MOZ_HAVE_CXX11_FINAL __final
+# endif
+# endif
+# endif
+# define MOZ_HAVE_NEVER_INLINE __attribute__((noinline))
+# define MOZ_HAVE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+# if _MSC_VER >= 1400
+# define MOZ_HAVE_CXX11_OVERRIDE
+ /* MSVC currently spells "final" as "sealed". */
+# define MOZ_HAVE_CXX11_FINAL sealed
+# endif
+# define MOZ_HAVE_NEVER_INLINE __declspec(noinline)
+# define MOZ_HAVE_NORETURN __declspec(noreturn)
+#endif
+
+/*
+ * MOZ_NEVER_INLINE is a macro which expands to tell the compiler that the
+ * method decorated with it must never be inlined, even if the compiler would
+ * otherwise choose to inline the method. Compilers aren't absolutely
+ * guaranteed to support this, but most do.
+ */
+#if defined(MOZ_HAVE_NEVER_INLINE)
+# define MOZ_NEVER_INLINE MOZ_HAVE_NEVER_INLINE
+#else
+# define MOZ_NEVER_INLINE /* no support */
+#endif
+
+/*
+ * MOZ_NORETURN, specified at the start of a function declaration, indicates
+ * that the given function does not return. (The function definition does not
+ * need to be annotated.)
+ *
+ * MOZ_NORETURN void abort(const char* msg);
+ *
+ * This modifier permits the compiler to optimize code assuming a call to such a
+ * function will never return. It also enables the compiler to avoid spurious
+ * warnings about not initializing variables, or about any other seemingly-dodgy
+ * operations performed after the function returns.
+ *
+ * This modifier does not affect the corresponding function's linking behavior.
+ */
+#if defined(MOZ_HAVE_NORETURN)
+# define MOZ_NORETURN MOZ_HAVE_NORETURN
+#else
+# define MOZ_NORETURN /* no support */
+#endif
+
+/*
+ * MOZ_ASAN_BLACKLIST is a macro to tell AddressSanitizer (a compile-time
+ * instrumentation shipped with Clang) to not instrument the annotated function.
+ * Furthermore, it will prevent the compiler from inlining the function because
+ * inlining currently breaks the blacklisting mechanism of AddressSanitizer.
+ */
+#if defined(MOZ_ASAN)
+# define MOZ_ASAN_BLACKLIST MOZ_NEVER_INLINE __attribute__((no_address_safety_analysis))
+# else
+# define MOZ_ASAN_BLACKLIST
+#endif
+
+
+#ifdef __cplusplus
+
+/*
+ * MOZ_DELETE, specified immediately prior to the ';' terminating an undefined-
+ * method declaration, attempts to delete that method from the corresponding
+ * class. An attempt to use the method will always produce an error *at compile
+ * time* (instead of sometimes as late as link time) when this macro can be
+ * implemented. For example, you can use MOZ_DELETE to produce classes with no
+ * implicit copy constructor or assignment operator:
+ *
+ * struct NonCopyable
+ * {
+ * private:
+ * NonCopyable(const NonCopyable& other) MOZ_DELETE;
+ * void operator=(const NonCopyable& other) MOZ_DELETE;
+ * };
+ *
+ * If MOZ_DELETE can't be implemented for the current compiler, use of the
+ * annotated method will still cause an error, but the error might occur at link
+ * time in some cases rather than at compile time.
+ *
+ * MOZ_DELETE relies on C++11 functionality not universally implemented. As a
+ * backstop, method declarations using MOZ_DELETE should be private.
+ */
+#if defined(MOZ_HAVE_CXX11_DELETE)
+# define MOZ_DELETE = delete
+#else
+# define MOZ_DELETE /* no support */
+#endif
+
+/*
+ * MOZ_OVERRIDE explicitly indicates that a virtual member function in a class
+ * overrides a member function of a base class, rather than potentially being a
+ * new member function. MOZ_OVERRIDE should be placed immediately before the
+ * ';' terminating the member function's declaration, or before '= 0;' if the
+ * member function is pure. If the member function is defined in the class
+ * definition, it should appear before the opening brace of the function body.
+ *
+ * class Base
+ * {
+ * public:
+ * virtual void f() = 0;
+ * };
+ * class Derived1 : public Base
+ * {
+ * public:
+ * virtual void f() MOZ_OVERRIDE;
+ * };
+ * class Derived2 : public Base
+ * {
+ * public:
+ * virtual void f() MOZ_OVERRIDE = 0;
+ * };
+ * class Derived3 : public Base
+ * {
+ * public:
+ * virtual void f() MOZ_OVERRIDE { }
+ * };
+ *
+ * In compilers supporting C++11 override controls, MOZ_OVERRIDE *requires* that
+ * the function marked with it override a member function of a base class: it
+ * is a compile error if it does not. Otherwise MOZ_OVERRIDE does not affect
+ * semantics and merely documents the override relationship to the reader (but
+ * of course must still be used correctly to not break C++11 compilers).
+ */
+#if defined(MOZ_HAVE_CXX11_OVERRIDE)
+# define MOZ_OVERRIDE override
+#else
+# define MOZ_OVERRIDE /* no support */
+#endif
+
+/*
+ * MOZ_FINAL indicates that some functionality cannot be overridden through
+ * inheritance. It can be used to annotate either classes/structs or virtual
+ * member functions.
+ *
+ * To annotate a class/struct with MOZ_FINAL, place MOZ_FINAL immediately after
+ * the name of the class, before the list of classes from which it derives (if
+ * any) and before its opening brace. MOZ_FINAL must not be used to annotate
+ * unnamed classes or structs. (With some compilers, and with C++11 proper, the
+ * underlying expansion is ambiguous with specifying a class name.)
+ *
+ * class Base MOZ_FINAL
+ * {
+ * public:
+ * Base();
+ * ~Base();
+ * virtual void f() { }
+ * };
+ * // This will be an error in some compilers:
+ * class Derived : public Base
+ * {
+ * public:
+ * ~Derived() { }
+ * };
+ *
+ * One particularly common reason to specify MOZ_FINAL upon a class is to tell
+ * the compiler that it's not dangerous for it to have a non-virtual destructor
+ * yet have one or more virtual functions, silencing the warning it might emit
+ * in this case. Suppose Base above weren't annotated with MOZ_FINAL. Because
+ * ~Base() is non-virtual, an attempt to delete a Derived* through a Base*
+ * wouldn't call ~Derived(), so any cleanup ~Derived() might do wouldn't happen.
+ * (Formally C++ says behavior is undefined, but compilers will likely just call
+ * ~Base() and not ~Derived().) Specifying MOZ_FINAL tells the compiler that
+ * it's safe for the destructor to be non-virtual.
+ *
+ * In compilers implementing final controls, it is an error to inherit from a
+ * class annotated with MOZ_FINAL. In other compilers it serves only as
+ * documentation.
+ *
+ * To annotate a virtual member function with MOZ_FINAL, place MOZ_FINAL
+ * immediately before the ';' terminating the member function's declaration, or
+ * before '= 0;' if the member function is pure. If the member function is
+ * defined in the class definition, it should appear before the opening brace of
+ * the function body. (This placement is identical to that for MOZ_OVERRIDE.
+ * If both are used, they should appear in the order 'MOZ_FINAL MOZ_OVERRIDE'
+ * for consistency.)
+ *
+ * class Base
+ * {
+ * public:
+ * virtual void f() MOZ_FINAL;
+ * };
+ * class Derived
+ * {
+ * public:
+ * // This will be an error in some compilers:
+ * virtual void f();
+ * };
+ *
+ * In compilers implementing final controls, it is an error for a derived class
+ * to override a method annotated with MOZ_FINAL. In other compilers it serves
+ * only as documentation.
+ */
+#if defined(MOZ_HAVE_CXX11_FINAL)
+# define MOZ_FINAL MOZ_HAVE_CXX11_FINAL
+#else
+# define MOZ_FINAL /* no support */
+#endif
+
+/**
+ * MOZ_WARN_UNUSED_RESULT tells the compiler to emit a warning if a function's
+ * return value is not used by the caller.
+ *
+ * Place this attribute at the very beginning of a function definition. For
+ * example, write
+ *
+ * MOZ_WARN_UNUSED_RESULT int foo();
+ *
+ * or
+ *
+ * MOZ_WARN_UNUSED_RESULT int foo() { return 42; }
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define MOZ_WARN_UNUSED_RESULT __attribute__ ((warn_unused_result))
+#else
+# define MOZ_WARN_UNUSED_RESULT
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* mozilla_Attributes_h_ */
diff --git a/sal/android/faulty.lib/mozilla/RefPtr.h b/sal/android/faulty.lib/mozilla/RefPtr.h
new file mode 100644
index 000000000000..15ace62ef8b0
--- /dev/null
+++ b/sal/android/faulty.lib/mozilla/RefPtr.h
@@ -0,0 +1,406 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Helpers for defining and using refcounted objects. */
+
+#ifndef mozilla_RefPtr_h_
+#define mozilla_RefPtr_h_
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+template<typename T> class RefCounted;
+template<typename T> class RefPtr;
+template<typename T> class TemporaryRef;
+template<typename T> class OutParamRef;
+template<typename T> OutParamRef<T> byRef(RefPtr<T>&);
+
+/**
+ * RefCounted<T> is a sort of a "mixin" for a class T. RefCounted
+ * manages, well, refcounting for T, and because RefCounted is
+ * parameterized on T, RefCounted<T> can call T's destructor directly.
+ * This means T doesn't need to have a virtual dtor and so doesn't
+ * need a vtable.
+ *
+ * RefCounted<T> is created with refcount == 0. Newly-allocated
+ * RefCounted<T> must immediately be assigned to a RefPtr to make the
+ * refcount > 0. It's an error to allocate and free a bare
+ * RefCounted<T>, i.e. outside of the RefPtr machinery. Attempts to
+ * do so will abort DEBUG builds.
+ *
+ * Live RefCounted<T> have refcount > 0. The lifetime (refcounts) of
+ * live RefCounted<T> are controlled by RefPtr<T> and
+ * RefPtr<super/subclass of T>. Upon a transition from refcounted==1
+ * to 0, the RefCounted<T> "dies" and is destroyed. The "destroyed"
+ * state is represented in DEBUG builds by refcount==-0xdead. This
+ * state distinguishes use-before-ref (refcount==0) from
+ * use-after-destroy (refcount==-0xdead).
+ */
+template<typename T>
+class RefCounted
+{
+ friend class RefPtr<T>;
+
+ public:
+ RefCounted() : refCnt(0) { }
+ ~RefCounted() { MOZ_ASSERT(refCnt == -0xdead); }
+
+ // Compatibility with nsRefPtr.
+ void AddRef() {
+ MOZ_ASSERT(refCnt >= 0);
+ ++refCnt;
+ }
+
+ void Release() {
+ MOZ_ASSERT(refCnt > 0);
+ if (0 == --refCnt) {
+#ifdef DEBUG
+ refCnt = -0xdead;
+#endif
+ delete static_cast<T*>(this);
+ }
+ }
+
+ // Compatibility with wtf::RefPtr.
+ void ref() { AddRef(); }
+ void deref() { Release(); }
+ int refCount() const { return refCnt; }
+ bool hasOneRef() const {
+ MOZ_ASSERT(refCnt > 0);
+ return refCnt == 1;
+ }
+
+ private:
+ int refCnt;
+};
+
+/**
+ * RefPtr points to a refcounted thing that has AddRef and Release
+ * methods to increase/decrease the refcount, respectively. After a
+ * RefPtr<T> is assigned a T*, the T* can be used through the RefPtr
+ * as if it were a T*.
+ *
+ * A RefPtr can forget its underlying T*, which results in the T*
+ * being wrapped in a temporary object until the T* is either
+ * re-adopted from or released by the temporary.
+ */
+template<typename T>
+class RefPtr
+{
+ // To allow them to use unref()
+ friend class TemporaryRef<T>;
+ friend class OutParamRef<T>;
+
+ struct DontRef {};
+
+ public:
+ RefPtr() : ptr(0) { }
+ RefPtr(const RefPtr& o) : ptr(ref(o.ptr)) {}
+ RefPtr(const TemporaryRef<T>& o) : ptr(o.drop()) {}
+ RefPtr(T* t) : ptr(ref(t)) {}
+
+ template<typename U>
+ RefPtr(const RefPtr<U>& o) : ptr(ref(o.get())) {}
+
+ ~RefPtr() { unref(ptr); }
+
+ RefPtr& operator=(const RefPtr& o) {
+ assign(ref(o.ptr));
+ return *this;
+ }
+ RefPtr& operator=(const TemporaryRef<T>& o) {
+ assign(o.drop());
+ return *this;
+ }
+ RefPtr& operator=(T* t) {
+ assign(ref(t));
+ return *this;
+ }
+
+ template<typename U>
+ RefPtr& operator=(const RefPtr<U>& o) {
+ assign(ref(o.get()));
+ return *this;
+ }
+
+ TemporaryRef<T> forget() {
+ T* tmp = ptr;
+ ptr = 0;
+ return TemporaryRef<T>(tmp, DontRef());
+ }
+
+ T* get() const { return ptr; }
+ operator T*() const { return ptr; }
+ T* operator->() const { return ptr; }
+ T& operator*() const { return *ptr; }
+ template<typename U>
+ operator TemporaryRef<U>() { return TemporaryRef<U>(ptr); }
+
+ private:
+ void assign(T* t) {
+ unref(ptr);
+ ptr = t;
+ }
+
+ T* ptr;
+
+ static MOZ_ALWAYS_INLINE T* ref(T* t) {
+ if (t)
+ t->AddRef();
+ return t;
+ }
+
+ static MOZ_ALWAYS_INLINE void unref(T* t) {
+ if (t)
+ t->Release();
+ }
+};
+
+/**
+ * TemporaryRef<T> represents an object that holds a temporary
+ * reference to a T. TemporaryRef objects can't be manually ref'd or
+ * unref'd (being temporaries, not lvalues), so can only relinquish
+ * references to other objects, or unref on destruction.
+ */
+template<typename T>
+class TemporaryRef
+{
+ // To allow it to construct TemporaryRef from a bare T*
+ friend class RefPtr<T>;
+
+ typedef typename RefPtr<T>::DontRef DontRef;
+
+ public:
+ TemporaryRef(T* t) : ptr(RefPtr<T>::ref(t)) {}
+ TemporaryRef(const TemporaryRef& o) : ptr(o.drop()) {}
+
+ template<typename U>
+ TemporaryRef(const TemporaryRef<U>& o) : ptr(o.drop()) {}
+
+ ~TemporaryRef() { RefPtr<T>::unref(ptr); }
+
+ T* drop() const {
+ T* tmp = ptr;
+ ptr = 0;
+ return tmp;
+ }
+
+ private:
+ TemporaryRef(T* t, const DontRef&) : ptr(t) {}
+
+ mutable T* ptr;
+
+ TemporaryRef() MOZ_DELETE;
+ void operator=(const TemporaryRef&) MOZ_DELETE;
+};
+
+/**
+ * OutParamRef is a wrapper that tracks a refcounted pointer passed as
+ * an outparam argument to a function. OutParamRef implements COM T**
+ * outparam semantics: this requires the callee to AddRef() the T*
+ * returned through the T** outparam on behalf of the caller. This
+ * means the caller (through OutParamRef) must Release() the old
+ * object contained in the tracked RefPtr. It's OK if the callee
+ * returns the same T* passed to it through the T** outparam, as long
+ * as the callee obeys the COM discipline.
+ *
+ * Prefer returning TemporaryRef<T> from functions over creating T**
+ * outparams and passing OutParamRef<T> to T**. Prefer RefPtr<T>*
+ * outparams over T** outparams.
+ */
+template<typename T>
+class OutParamRef
+{
+ friend OutParamRef byRef<T>(RefPtr<T>&);
+
+ public:
+ ~OutParamRef() {
+ RefPtr<T>::unref(refPtr.ptr);
+ refPtr.ptr = tmp;
+ }
+
+ operator T**() { return &tmp; }
+
+ private:
+ OutParamRef(RefPtr<T>& p) : refPtr(p), tmp(p.get()) {}
+
+ RefPtr<T>& refPtr;
+ T* tmp;
+
+ OutParamRef() MOZ_DELETE;
+ OutParamRef& operator=(const OutParamRef&) MOZ_DELETE;
+};
+
+/**
+ * byRef cooperates with OutParamRef to implement COM outparam semantics.
+ */
+template<typename T>
+OutParamRef<T>
+byRef(RefPtr<T>& ptr)
+{
+ return OutParamRef<T>(ptr);
+}
+
+} // namespace mozilla
+
+#endif // mozilla_RefPtr_h_
+
+
+#if 0
+
+// Command line that builds these tests
+//
+// cp RefPtr.h test.cc && g++ -g -Wall -pedantic -DDEBUG -o test test.cc && ./test
+
+using namespace mozilla;
+
+struct Foo : public RefCounted<Foo>
+{
+ Foo() : dead(false) { }
+ ~Foo() {
+ MOZ_ASSERT(!dead);
+ dead = true;
+ numDestroyed++;
+ }
+
+ bool dead;
+ static int numDestroyed;
+};
+int Foo::numDestroyed;
+
+struct Bar : public Foo { };
+
+TemporaryRef<Foo>
+NewFoo()
+{
+ return RefPtr<Foo>(new Foo());
+}
+
+TemporaryRef<Foo>
+NewBar()
+{
+ return new Bar();
+}
+
+void
+GetNewFoo(Foo** f)
+{
+ *f = new Bar();
+ // Kids, don't try this at home
+ (*f)->AddRef();
+}
+
+void
+GetPassedFoo(Foo** f)
+{
+ // Kids, don't try this at home
+ (*f)->AddRef();
+}
+
+void
+GetNewFoo(RefPtr<Foo>* f)
+{
+ *f = new Bar();
+}
+
+void
+GetPassedFoo(RefPtr<Foo>* f)
+{}
+
+TemporaryRef<Foo>
+GetNullFoo()
+{
+ return 0;
+}
+
+int
+main(int argc, char** argv)
+{
+ // This should blow up
+// Foo* f = new Foo(); delete f;
+
+ MOZ_ASSERT(0 == Foo::numDestroyed);
+ {
+ RefPtr<Foo> f = new Foo();
+ MOZ_ASSERT(f->refCount() == 1);
+ }
+ MOZ_ASSERT(1 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f1 = NewFoo();
+ RefPtr<Foo> f2(NewFoo());
+ MOZ_ASSERT(1 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(3 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> b = NewBar();
+ MOZ_ASSERT(3 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(4 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f1;
+ {
+ f1 = new Foo();
+ RefPtr<Foo> f2(f1);
+ RefPtr<Foo> f3 = f2;
+ MOZ_ASSERT(4 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(4 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(5 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f = new Foo();
+ f.forget();
+ MOZ_ASSERT(6 == Foo::numDestroyed);
+ }
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetNewFoo(byRef(f));
+ MOZ_ASSERT(7 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(8 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetPassedFoo(byRef(f));
+ MOZ_ASSERT(8 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(9 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetNewFoo(&f);
+ MOZ_ASSERT(10 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(11 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f = new Foo();
+ GetPassedFoo(&f);
+ MOZ_ASSERT(11 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(12 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f1 = new Bar();
+ }
+ MOZ_ASSERT(13 == Foo::numDestroyed);
+
+ {
+ RefPtr<Foo> f = GetNullFoo();
+ MOZ_ASSERT(13 == Foo::numDestroyed);
+ }
+ MOZ_ASSERT(13 == Foo::numDestroyed);
+
+ return 0;
+}
+
+#endif